Serhiy Stetskovych commited on
Commit
2ccf6b5
1 Parent(s): 78111f8

Initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. __init__.py +0 -0
  3. app.py +217 -0
  4. checkpoint_epoch=499.ckpt +3 -0
  5. g_00120000 +3 -0
  6. hifigan/LICENSE +21 -0
  7. hifigan/README.md +101 -0
  8. hifigan/__init__.py +0 -0
  9. hifigan/config.py +28 -0
  10. hifigan/denoiser.py +64 -0
  11. hifigan/env.py +17 -0
  12. hifigan/meldataset.py +217 -0
  13. hifigan/models.py +368 -0
  14. hifigan/xutils.py +60 -0
  15. pflow/__init__.py +0 -0
  16. pflow/data/__init__.py +0 -0
  17. pflow/data/components/__init__.py +0 -0
  18. pflow/data/text_mel_datamodule.py +256 -0
  19. pflow/models/__init__.py +0 -0
  20. pflow/models/baselightningmodule.py +247 -0
  21. pflow/models/components/__init__.py +0 -0
  22. pflow/models/components/aligner.py +235 -0
  23. pflow/models/components/attentions.py +491 -0
  24. pflow/models/components/commons.py +179 -0
  25. pflow/models/components/decoder.py +444 -0
  26. pflow/models/components/flow_matching.py +148 -0
  27. pflow/models/components/speech_prompt_encoder.py +636 -0
  28. pflow/models/components/speech_prompt_encoder_v0.py +618 -0
  29. pflow/models/components/test.py +6 -0
  30. pflow/models/components/text_encoder.py +425 -0
  31. pflow/models/components/transformer.py +316 -0
  32. pflow/models/components/vits_modules.py +194 -0
  33. pflow/models/components/vits_posterior.py +43 -0
  34. pflow/models/components/vits_wn_decoder.py +79 -0
  35. pflow/models/components/wn_pflow_decoder.py +117 -0
  36. pflow/models/pflow_tts.py +182 -0
  37. pflow/text/__init__.py +53 -0
  38. pflow/text/cleaners.py +19 -0
  39. pflow/text/numbers.py +71 -0
  40. pflow/text/symbols.py +17 -0
  41. pflow/text/textnormalizer.py +198 -0
  42. pflow/utils/__init__.py +5 -0
  43. pflow/utils/audio.py +82 -0
  44. pflow/utils/generate_data_statistics.py +115 -0
  45. pflow/utils/instantiators.py +56 -0
  46. pflow/utils/logging_utils.py +53 -0
  47. pflow/utils/model.py +90 -0
  48. pflow/utils/monotonic_align/__init__.py +19 -0
  49. pflow/utils/monotonic_align/core.pyx +42 -0
  50. pflow/utils/pylogger.py +21 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ g_00120000 filter=lfs diff=lfs merge=lfs -text
37
+ g_05000000 filter=lfs diff=lfs merge=lfs -text
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from pathlib import Path
3
+ import torchaudio
4
+ import gradio as gr
5
+
6
+ import numpy as np
7
+
8
+ import torch
9
+
10
+
11
+ from hifigan.config import v1
12
+ from hifigan.denoiser import Denoiser
13
+ from hifigan.env import AttrDict
14
+ from hifigan.models import Generator as HiFiGAN
15
+
16
+
17
+ #from BigVGAN.models import BigVGAN
18
+ #from BigVGAN.env import AttrDict as BigVGANAttrDict
19
+
20
+
21
+ from pflow.models.pflow_tts import pflowTTS
22
+ from pflow.text import text_to_sequence, sequence_to_text
23
+ from pflow.utils.utils import intersperse
24
+ from pflow.data.text_mel_datamodule import mel_spectrogram
25
+ from pflow.utils.model import normalize
26
+
27
+
28
+
29
+ BIGVGAN_CONFIG = {
30
+ "resblock": "1",
31
+ "num_gpus": 0,
32
+ "batch_size": 32,
33
+ "learning_rate": 0.0001,
34
+ "adam_b1": 0.8,
35
+ "adam_b2": 0.99,
36
+ "lr_decay": 0.999,
37
+ "seed": 1234,
38
+
39
+ "upsample_rates": [4,4,2,2,2,2],
40
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
41
+ "upsample_initial_channel": 1536,
42
+ "resblock_kernel_sizes": [3,7,11],
43
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
44
+
45
+ "activation": "snakebeta",
46
+ "snake_logscale": True,
47
+
48
+ "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]],
49
+ "mpd_reshapes": [2, 3, 5, 7, 11],
50
+ "use_spectral_norm": False,
51
+ "discriminator_channel_mult": 1,
52
+
53
+ "segment_size": 8192,
54
+ "num_mels": 80,
55
+ "num_freq": 1025,
56
+ "n_fft": 1024,
57
+ "hop_size": 256,
58
+ "win_size": 1024,
59
+
60
+ "sampling_rate": 22050,
61
+
62
+ "fmin": 0,
63
+ "fmax": 8000,
64
+ "fmax_for_loss": None,
65
+
66
+ "num_workers": 4,
67
+
68
+ "dist_config": {
69
+ "dist_backend": "nccl",
70
+ "dist_url": "tcp://localhost:54321",
71
+ "world_size": 1
72
+ }
73
+ }
74
+
75
+ PFLOW_MODEL_PATH = 'checkpoint_epoch=499.ckpt'
76
+ VOCODER_MODEL_PATH = 'g_00120000'
77
+ VOCODER_BIGVGAN_MODEL_PATH = 'g_05000000'
78
+
79
+ wav, sr = torchaudio.load('prompt.wav')
80
+
81
+ prompt = mel_spectrogram(
82
+ wav,
83
+ 1024,
84
+ 80,
85
+ 22050,
86
+ 256,
87
+ 1024,
88
+ 0,
89
+ 8000,
90
+ center=False,
91
+ )[:,:,:264]
92
+
93
+
94
+
95
+ def process_text(text: str, device: torch.device):
96
+ x = torch.tensor(
97
+ intersperse(text_to_sequence(text, ["ukr_cleaners"]), 0),
98
+ dtype=torch.long,
99
+ device=device,
100
+ )[None]
101
+ x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device)
102
+ x_phones = sequence_to_text(x.squeeze(0).tolist())
103
+ return {"x_orig": text, "x": x, "x_lengths": x_lengths, 'x_phones':x_phones}
104
+
105
+
106
+
107
+
108
+ def load_hifigan(checkpoint_path, device):
109
+ h = AttrDict(v1)
110
+ hifigan = HiFiGAN(h).to(device)
111
+ hifigan.load_state_dict(torch.load(checkpoint_path, map_location=device)["generator"])
112
+ _ = hifigan.eval()
113
+ hifigan.remove_weight_norm()
114
+ return hifigan
115
+
116
+
117
+ def load_bigvgan(checkpoint_path, device):
118
+ print("Loading '{}'".format(checkpoint_path))
119
+ checkpoint_dict = torch.load(checkpoint_path, map_location=device)
120
+
121
+
122
+ h = BigVGANAttrDict(BIGVGAN_CONFIG)
123
+ torch.manual_seed(h.seed)
124
+
125
+ generator = BigVGAN(h).to(device)
126
+ generator.load_state_dict(checkpoint_dict['generator'])
127
+ generator.eval()
128
+ generator.remove_weight_norm()
129
+ return generator
130
+
131
+
132
+ def to_waveform(mel, vocoder, denoiser=None):
133
+ audio = vocoder(mel).clamp(-1, 1)
134
+ if denoiser is not None:
135
+ audio = denoiser(audio.squeeze(), strength=0.00025).cpu().squeeze()
136
+
137
+ return audio.cpu().squeeze()
138
+
139
+
140
+
141
+
142
+
143
+
144
+ def get_device():
145
+ if torch.cuda.is_available():
146
+ print("[+] GPU Available! Using GPU")
147
+ device = torch.device("cuda")
148
+ else:
149
+ print("[-] GPU not available or forced CPU run! Using CPU")
150
+ device = torch.device("cpu")
151
+ return device
152
+
153
+
154
+ device = get_device()
155
+ model = pflowTTS.load_from_checkpoint(PFLOW_MODEL_PATH, map_location=device)
156
+ _ = model.eval()
157
+ #vocoder = load_bigvgan(VOCODER_BIGVGAN_MODEL_PATH, device)
158
+ vocoder = load_hifigan(VOCODER_MODEL_PATH, device)
159
+ denoiser = Denoiser(vocoder, mode="zeros")
160
+
161
+ @torch.inference_mode()
162
+ def synthesise(text, temperature, speed):
163
+ if len(text) > 1000:
164
+ raise gr.Error("Текст повинен бути коротшим за 1000 символів.")
165
+
166
+ text_processed = process_text(text.strip(), device)
167
+
168
+ output = model.synthesise(
169
+ text_processed["x"],
170
+ text_processed["x_lengths"],
171
+ n_timesteps=40,
172
+ temperature=temperature,
173
+ length_scale=1/speed,
174
+ prompt= normalize(prompt, model.mel_mean, model.mel_std)
175
+ )
176
+ waveform = to_waveform(output["mel"], vocoder, denoiser)
177
+
178
+ return text_processed['x_phones'][1::2], (22050, waveform.numpy())
179
+
180
+
181
+ description = f'''
182
+ # Експериментальна апка для генерації аудіо з тексту.
183
+
184
+ pflow checkpoint {PFLOW_MODEL_PATH}
185
+ vocoder: HIFIGAN(трейнутий на датасеті, з нуля) - {VOCODER_MODEL_PATH}
186
+ '''
187
+
188
+
189
+ if __name__ == "__main__":
190
+ i = gr.Interface(
191
+ fn=synthesise,
192
+ description=description,
193
+ inputs=[
194
+ gr.Text(label='Текст для синтезу:', lines=5, max_lines=10),
195
+ gr.Slider(minimum=0.0, maximum=1.0, label="Температура", value=0.2),
196
+ gr.Slider(minimum=0.6, maximum=2.0, label="Швидкість", value=1.0)
197
+ ],
198
+ outputs=[
199
+ gr.Text(label='Фонемізований текст:', lines=5),
200
+ gr.Audio(
201
+ label="Згенероване аудіо:",
202
+ autoplay=False,
203
+ streaming=False,
204
+ type="numpy",
205
+ )
206
+
207
+ ],
208
+ allow_flagging ='manual',
209
+ flagging_options=[("Якщо дуже погоне аудіо, тисни цю кнопку.", "negative")],
210
+ cache_examples=True,
211
+ title='',
212
+ # description=description,
213
+ # article=article,
214
+ # examples=examples,
215
+ )
216
+ i.queue(max_size=20, default_concurrency_limit=4)
217
+ i.launch(share=False, server_name="0.0.0.0")
checkpoint_epoch=499.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39051170c6c0d9abce47d0073f796912d5ce3854ade8f707cb30333f50160d99
3
+ size 279562867
g_00120000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f25c6dbc515ed387edd5d2e5683a50510aa33986e8a79273efe1216084f0f078
3
+ size 55824433
hifigan/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Jungil Kong
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
hifigan/README.md ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis
2
+
3
+ ### Jungil Kong, Jaehyeon Kim, Jaekyoung Bae
4
+
5
+ In our [paper](https://arxiv.org/abs/2010.05646),
6
+ we proposed HiFi-GAN: a GAN-based model capable of generating high fidelity speech efficiently.<br/>
7
+ We provide our implementation and pretrained models as open source in this repository.
8
+
9
+ **Abstract :**
10
+ Several recent work on speech synthesis have employed generative adversarial networks (GANs) to produce raw waveforms.
11
+ Although such methods improve the sampling efficiency and memory usage,
12
+ their sample quality has not yet reached that of autoregressive and flow-based generative models.
13
+ In this work, we propose HiFi-GAN, which achieves both efficient and high-fidelity speech synthesis.
14
+ As speech audio consists of sinusoidal signals with various periods,
15
+ we demonstrate that modeling periodic patterns of an audio is crucial for enhancing sample quality.
16
+ A subjective human evaluation (mean opinion score, MOS) of a single speaker dataset indicates that our proposed method
17
+ demonstrates similarity to human quality while generating 22.05 kHz high-fidelity audio 167.9 times faster than
18
+ real-time on a single V100 GPU. We further show the generality of HiFi-GAN to the mel-spectrogram inversion of unseen
19
+ speakers and end-to-end speech synthesis. Finally, a small footprint version of HiFi-GAN generates samples 13.4 times
20
+ faster than real-time on CPU with comparable quality to an autoregressive counterpart.
21
+
22
+ Visit our [demo website](https://jik876.github.io/hifi-gan-demo/) for audio samples.
23
+
24
+ ## Pre-requisites
25
+
26
+ 1. Python >= 3.6
27
+ 2. Clone this repository.
28
+ 3. Install python requirements. Please refer [requirements.txt](requirements.txt)
29
+ 4. Download and extract the [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/).
30
+ And move all wav files to `LJSpeech-1.1/wavs`
31
+
32
+ ## Training
33
+
34
+ ```
35
+ python train.py --config config_v1.json
36
+ ```
37
+
38
+ To train V2 or V3 Generator, replace `config_v1.json` with `config_v2.json` or `config_v3.json`.<br>
39
+ Checkpoints and copy of the configuration file are saved in `cp_hifigan` directory by default.<br>
40
+ You can change the path by adding `--checkpoint_path` option.
41
+
42
+ Validation loss during training with V1 generator.<br>
43
+ ![validation loss](./validation_loss.png)
44
+
45
+ ## Pretrained Model
46
+
47
+ You can also use pretrained models we provide.<br/>
48
+ [Download pretrained models](https://drive.google.com/drive/folders/1-eEYTB5Av9jNql0WGBlRoi-WH2J7bp5Y?usp=sharing)<br/>
49
+ Details of each folder are as in follows:
50
+
51
+ | Folder Name | Generator | Dataset | Fine-Tuned |
52
+ | ------------ | --------- | --------- | ------------------------------------------------------ |
53
+ | LJ_V1 | V1 | LJSpeech | No |
54
+ | LJ_V2 | V2 | LJSpeech | No |
55
+ | LJ_V3 | V3 | LJSpeech | No |
56
+ | LJ_FT_T2_V1 | V1 | LJSpeech | Yes ([Tacotron2](https://github.com/NVIDIA/tacotron2)) |
57
+ | LJ_FT_T2_V2 | V2 | LJSpeech | Yes ([Tacotron2](https://github.com/NVIDIA/tacotron2)) |
58
+ | LJ_FT_T2_V3 | V3 | LJSpeech | Yes ([Tacotron2](https://github.com/NVIDIA/tacotron2)) |
59
+ | VCTK_V1 | V1 | VCTK | No |
60
+ | VCTK_V2 | V2 | VCTK | No |
61
+ | VCTK_V3 | V3 | VCTK | No |
62
+ | UNIVERSAL_V1 | V1 | Universal | No |
63
+
64
+ We provide the universal model with discriminator weights that can be used as a base for transfer learning to other datasets.
65
+
66
+ ## Fine-Tuning
67
+
68
+ 1. Generate mel-spectrograms in numpy format using [Tacotron2](https://github.com/NVIDIA/tacotron2) with teacher-forcing.<br/>
69
+ The file name of the generated mel-spectrogram should match the audio file and the extension should be `.npy`.<br/>
70
+ Example:
71
+ ` Audio File : LJ001-0001.wav
72
+ Mel-Spectrogram File : LJ001-0001.npy`
73
+ 2. Create `ft_dataset` folder and copy the generated mel-spectrogram files into it.<br/>
74
+ 3. Run the following command.
75
+ ```
76
+ python train.py --fine_tuning True --config config_v1.json
77
+ ```
78
+ For other command line options, please refer to the training section.
79
+
80
+ ## Inference from wav file
81
+
82
+ 1. Make `test_files` directory and copy wav files into the directory.
83
+ 2. Run the following command.
84
+ ` python inference.py --checkpoint_file [generator checkpoint file path]`
85
+ Generated wav files are saved in `generated_files` by default.<br>
86
+ You can change the path by adding `--output_dir` option.
87
+
88
+ ## Inference for end-to-end speech synthesis
89
+
90
+ 1. Make `test_mel_files` directory and copy generated mel-spectrogram files into the directory.<br>
91
+ You can generate mel-spectrograms using [Tacotron2](https://github.com/NVIDIA/tacotron2),
92
+ [Glow-TTS](https://github.com/jaywalnut310/glow-tts) and so forth.
93
+ 2. Run the following command.
94
+ ` python inference_e2e.py --checkpoint_file [generator checkpoint file path]`
95
+ Generated wav files are saved in `generated_files_from_mel` by default.<br>
96
+ You can change the path by adding `--output_dir` option.
97
+
98
+ ## Acknowledgements
99
+
100
+ We referred to [WaveGlow](https://github.com/NVIDIA/waveglow), [MelGAN](https://github.com/descriptinc/melgan-neurips)
101
+ and [Tacotron2](https://github.com/NVIDIA/tacotron2) to implement this.
hifigan/__init__.py ADDED
File without changes
hifigan/config.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ v1 = {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 16,
5
+ "learning_rate": 0.0004,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.999,
9
+ "seed": 1234,
10
+ "upsample_rates": [8, 8, 2, 2],
11
+ "upsample_kernel_sizes": [16, 16, 4, 4],
12
+ "upsample_initial_channel": 512,
13
+ "resblock_kernel_sizes": [3, 7, 11],
14
+ "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
15
+ "resblock_initial_channel": 256,
16
+ "segment_size": 8192,
17
+ "num_mels": 80,
18
+ "num_freq": 1025,
19
+ "n_fft": 1024,
20
+ "hop_size": 256,
21
+ "win_size": 1024,
22
+ "sampling_rate": 22050,
23
+ "fmin": 0,
24
+ "fmax": 8000,
25
+ "fmax_loss": None,
26
+ "num_workers": 4,
27
+ "dist_config": {"dist_backend": "nccl", "dist_url": "tcp://localhost:54321", "world_size": 1},
28
+ }
hifigan/denoiser.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code modified from Rafael Valle's implementation https://github.com/NVIDIA/waveglow/blob/5bc2a53e20b3b533362f974cfa1ea0267ae1c2b1/denoiser.py
2
+
3
+ """Waveglow style denoiser can be used to remove the artifacts from the HiFiGAN generated audio."""
4
+ import torch
5
+
6
+
7
+ class Denoiser(torch.nn.Module):
8
+ """Removes model bias from audio produced with waveglow"""
9
+
10
+ def __init__(self, vocoder, filter_length=1024, n_overlap=4, win_length=1024, mode="zeros"):
11
+ super().__init__()
12
+ self.filter_length = filter_length
13
+ self.hop_length = int(filter_length / n_overlap)
14
+ self.win_length = win_length
15
+
16
+ dtype, device = next(vocoder.parameters()).dtype, next(vocoder.parameters()).device
17
+ self.device = device
18
+ if mode == "zeros":
19
+ mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device)
20
+ elif mode == "normal":
21
+ mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device)
22
+ else:
23
+ raise Exception(f"Mode {mode} if not supported")
24
+
25
+ def stft_fn(audio, n_fft, hop_length, win_length, window):
26
+ spec = torch.stft(
27
+ audio,
28
+ n_fft=n_fft,
29
+ hop_length=hop_length,
30
+ win_length=win_length,
31
+ window=window,
32
+ return_complex=True,
33
+ )
34
+ spec = torch.view_as_real(spec)
35
+ return torch.sqrt(spec.pow(2).sum(-1)), torch.atan2(spec[..., -1], spec[..., 0])
36
+
37
+ self.stft = lambda x: stft_fn(
38
+ audio=x,
39
+ n_fft=self.filter_length,
40
+ hop_length=self.hop_length,
41
+ win_length=self.win_length,
42
+ window=torch.hann_window(self.win_length, device=device),
43
+ )
44
+ self.istft = lambda x, y: torch.istft(
45
+ torch.complex(x * torch.cos(y), x * torch.sin(y)),
46
+ n_fft=self.filter_length,
47
+ hop_length=self.hop_length,
48
+ win_length=self.win_length,
49
+ window=torch.hann_window(self.win_length, device=device),
50
+ )
51
+
52
+ with torch.no_grad():
53
+ bias_audio = vocoder(mel_input).float().squeeze(0)
54
+ bias_spec, _ = self.stft(bias_audio)
55
+
56
+ self.register_buffer("bias_spec", bias_spec[:, :, 0][:, :, None])
57
+
58
+ @torch.inference_mode()
59
+ def forward(self, audio, strength=0.0005):
60
+ audio_spec, audio_angles = self.stft(audio)
61
+ audio_spec_denoised = audio_spec - self.bias_spec.to(audio.device) * strength
62
+ audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
63
+ audio_denoised = self.istft(audio_spec_denoised, audio_angles)
64
+ return audio_denoised
hifigan/env.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jik876/hifi-gan """
2
+
3
+ import os
4
+ import shutil
5
+
6
+
7
+ class AttrDict(dict):
8
+ def __init__(self, *args, **kwargs):
9
+ super().__init__(*args, **kwargs)
10
+ self.__dict__ = self
11
+
12
+
13
+ def build_env(config, config_name, path):
14
+ t_path = os.path.join(path, config_name)
15
+ if config != t_path:
16
+ os.makedirs(path, exist_ok=True)
17
+ shutil.copyfile(config, os.path.join(path, config_name))
hifigan/meldataset.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jik876/hifi-gan """
2
+
3
+ import math
4
+ import os
5
+ import random
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ from librosa.filters import mel as librosa_mel_fn
11
+ from librosa.util import normalize
12
+ from scipy.io.wavfile import read
13
+
14
+ MAX_WAV_VALUE = 32768.0
15
+
16
+
17
+ def load_wav(full_path):
18
+ sampling_rate, data = read(full_path)
19
+ return data, sampling_rate
20
+
21
+
22
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
23
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
24
+
25
+
26
+ def dynamic_range_decompression(x, C=1):
27
+ return np.exp(x) / C
28
+
29
+
30
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
31
+ return torch.log(torch.clamp(x, min=clip_val) * C)
32
+
33
+
34
+ def dynamic_range_decompression_torch(x, C=1):
35
+ return torch.exp(x) / C
36
+
37
+
38
+ def spectral_normalize_torch(magnitudes):
39
+ output = dynamic_range_compression_torch(magnitudes)
40
+ return output
41
+
42
+
43
+ def spectral_de_normalize_torch(magnitudes):
44
+ output = dynamic_range_decompression_torch(magnitudes)
45
+ return output
46
+
47
+
48
+ mel_basis = {}
49
+ hann_window = {}
50
+
51
+
52
+ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
53
+ if torch.min(y) < -1.0:
54
+ print("min value is ", torch.min(y))
55
+ if torch.max(y) > 1.0:
56
+ print("max value is ", torch.max(y))
57
+
58
+ global mel_basis, hann_window # pylint: disable=global-statement
59
+ if fmax not in mel_basis:
60
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
61
+ mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
62
+ hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
63
+
64
+ y = torch.nn.functional.pad(
65
+ y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
66
+ )
67
+ y = y.squeeze(1)
68
+
69
+ spec = torch.view_as_real(
70
+ torch.stft(
71
+ y,
72
+ n_fft,
73
+ hop_length=hop_size,
74
+ win_length=win_size,
75
+ window=hann_window[str(y.device)],
76
+ center=center,
77
+ pad_mode="reflect",
78
+ normalized=False,
79
+ onesided=True,
80
+ return_complex=True,
81
+ )
82
+ )
83
+
84
+ spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
85
+
86
+ spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec)
87
+ spec = spectral_normalize_torch(spec)
88
+
89
+ return spec
90
+
91
+
92
+ def get_dataset_filelist(a):
93
+ with open(a.input_training_file, encoding="utf-8") as fi:
94
+ training_files = [
95
+ os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0
96
+ ]
97
+
98
+ with open(a.input_validation_file, encoding="utf-8") as fi:
99
+ validation_files = [
100
+ os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0
101
+ ]
102
+ return training_files, validation_files
103
+
104
+
105
+ class MelDataset(torch.utils.data.Dataset):
106
+ def __init__(
107
+ self,
108
+ training_files,
109
+ segment_size,
110
+ n_fft,
111
+ num_mels,
112
+ hop_size,
113
+ win_size,
114
+ sampling_rate,
115
+ fmin,
116
+ fmax,
117
+ split=True,
118
+ shuffle=True,
119
+ n_cache_reuse=1,
120
+ device=None,
121
+ fmax_loss=None,
122
+ fine_tuning=False,
123
+ base_mels_path=None,
124
+ ):
125
+ self.audio_files = training_files
126
+ random.seed(1234)
127
+ if shuffle:
128
+ random.shuffle(self.audio_files)
129
+ self.segment_size = segment_size
130
+ self.sampling_rate = sampling_rate
131
+ self.split = split
132
+ self.n_fft = n_fft
133
+ self.num_mels = num_mels
134
+ self.hop_size = hop_size
135
+ self.win_size = win_size
136
+ self.fmin = fmin
137
+ self.fmax = fmax
138
+ self.fmax_loss = fmax_loss
139
+ self.cached_wav = None
140
+ self.n_cache_reuse = n_cache_reuse
141
+ self._cache_ref_count = 0
142
+ self.device = device
143
+ self.fine_tuning = fine_tuning
144
+ self.base_mels_path = base_mels_path
145
+
146
+ def __getitem__(self, index):
147
+ filename = self.audio_files[index]
148
+ if self._cache_ref_count == 0:
149
+ audio, sampling_rate = load_wav(filename)
150
+ audio = audio / MAX_WAV_VALUE
151
+ if not self.fine_tuning:
152
+ audio = normalize(audio) * 0.95
153
+ self.cached_wav = audio
154
+ if sampling_rate != self.sampling_rate:
155
+ raise ValueError(f"{sampling_rate} SR doesn't match target {self.sampling_rate} SR")
156
+ self._cache_ref_count = self.n_cache_reuse
157
+ else:
158
+ audio = self.cached_wav
159
+ self._cache_ref_count -= 1
160
+
161
+ audio = torch.FloatTensor(audio)
162
+ audio = audio.unsqueeze(0)
163
+
164
+ if not self.fine_tuning:
165
+ if self.split:
166
+ if audio.size(1) >= self.segment_size:
167
+ max_audio_start = audio.size(1) - self.segment_size
168
+ audio_start = random.randint(0, max_audio_start)
169
+ audio = audio[:, audio_start : audio_start + self.segment_size]
170
+ else:
171
+ audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant")
172
+
173
+ mel = mel_spectrogram(
174
+ audio,
175
+ self.n_fft,
176
+ self.num_mels,
177
+ self.sampling_rate,
178
+ self.hop_size,
179
+ self.win_size,
180
+ self.fmin,
181
+ self.fmax,
182
+ center=False,
183
+ )
184
+ else:
185
+ mel = np.load(os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + ".npy"))
186
+ mel = torch.from_numpy(mel)
187
+
188
+ if len(mel.shape) < 3:
189
+ mel = mel.unsqueeze(0)
190
+
191
+ if self.split:
192
+ frames_per_seg = math.ceil(self.segment_size / self.hop_size)
193
+
194
+ if audio.size(1) >= self.segment_size:
195
+ mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
196
+ mel = mel[:, :, mel_start : mel_start + frames_per_seg]
197
+ audio = audio[:, mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size]
198
+ else:
199
+ mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant")
200
+ audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant")
201
+
202
+ mel_loss = mel_spectrogram(
203
+ audio,
204
+ self.n_fft,
205
+ self.num_mels,
206
+ self.sampling_rate,
207
+ self.hop_size,
208
+ self.win_size,
209
+ self.fmin,
210
+ self.fmax_loss,
211
+ center=False,
212
+ )
213
+
214
+ return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
215
+
216
+ def __len__(self):
217
+ return len(self.audio_files)
hifigan/models.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jik876/hifi-gan """
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
7
+ from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
8
+
9
+ from .xutils import get_padding, init_weights
10
+
11
+ LRELU_SLOPE = 0.1
12
+
13
+
14
+ class ResBlock1(torch.nn.Module):
15
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
16
+ super().__init__()
17
+ self.h = h
18
+ self.convs1 = nn.ModuleList(
19
+ [
20
+ weight_norm(
21
+ Conv1d(
22
+ channels,
23
+ channels,
24
+ kernel_size,
25
+ 1,
26
+ dilation=dilation[0],
27
+ padding=get_padding(kernel_size, dilation[0]),
28
+ )
29
+ ),
30
+ weight_norm(
31
+ Conv1d(
32
+ channels,
33
+ channels,
34
+ kernel_size,
35
+ 1,
36
+ dilation=dilation[1],
37
+ padding=get_padding(kernel_size, dilation[1]),
38
+ )
39
+ ),
40
+ weight_norm(
41
+ Conv1d(
42
+ channels,
43
+ channels,
44
+ kernel_size,
45
+ 1,
46
+ dilation=dilation[2],
47
+ padding=get_padding(kernel_size, dilation[2]),
48
+ )
49
+ ),
50
+ ]
51
+ )
52
+ self.convs1.apply(init_weights)
53
+
54
+ self.convs2 = nn.ModuleList(
55
+ [
56
+ weight_norm(
57
+ Conv1d(
58
+ channels,
59
+ channels,
60
+ kernel_size,
61
+ 1,
62
+ dilation=1,
63
+ padding=get_padding(kernel_size, 1),
64
+ )
65
+ ),
66
+ weight_norm(
67
+ Conv1d(
68
+ channels,
69
+ channels,
70
+ kernel_size,
71
+ 1,
72
+ dilation=1,
73
+ padding=get_padding(kernel_size, 1),
74
+ )
75
+ ),
76
+ weight_norm(
77
+ Conv1d(
78
+ channels,
79
+ channels,
80
+ kernel_size,
81
+ 1,
82
+ dilation=1,
83
+ padding=get_padding(kernel_size, 1),
84
+ )
85
+ ),
86
+ ]
87
+ )
88
+ self.convs2.apply(init_weights)
89
+
90
+ def forward(self, x):
91
+ for c1, c2 in zip(self.convs1, self.convs2):
92
+ xt = F.leaky_relu(x, LRELU_SLOPE)
93
+ xt = c1(xt)
94
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
95
+ xt = c2(xt)
96
+ x = xt + x
97
+ return x
98
+
99
+ def remove_weight_norm(self):
100
+ for l in self.convs1:
101
+ remove_weight_norm(l)
102
+ for l in self.convs2:
103
+ remove_weight_norm(l)
104
+
105
+
106
+ class ResBlock2(torch.nn.Module):
107
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
108
+ super().__init__()
109
+ self.h = h
110
+ self.convs = nn.ModuleList(
111
+ [
112
+ weight_norm(
113
+ Conv1d(
114
+ channels,
115
+ channels,
116
+ kernel_size,
117
+ 1,
118
+ dilation=dilation[0],
119
+ padding=get_padding(kernel_size, dilation[0]),
120
+ )
121
+ ),
122
+ weight_norm(
123
+ Conv1d(
124
+ channels,
125
+ channels,
126
+ kernel_size,
127
+ 1,
128
+ dilation=dilation[1],
129
+ padding=get_padding(kernel_size, dilation[1]),
130
+ )
131
+ ),
132
+ ]
133
+ )
134
+ self.convs.apply(init_weights)
135
+
136
+ def forward(self, x):
137
+ for c in self.convs:
138
+ xt = F.leaky_relu(x, LRELU_SLOPE)
139
+ xt = c(xt)
140
+ x = xt + x
141
+ return x
142
+
143
+ def remove_weight_norm(self):
144
+ for l in self.convs:
145
+ remove_weight_norm(l)
146
+
147
+
148
+ class Generator(torch.nn.Module):
149
+ def __init__(self, h):
150
+ super().__init__()
151
+ self.h = h
152
+ self.num_kernels = len(h.resblock_kernel_sizes)
153
+ self.num_upsamples = len(h.upsample_rates)
154
+ self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
155
+ resblock = ResBlock1 if h.resblock == "1" else ResBlock2
156
+
157
+ self.ups = nn.ModuleList()
158
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
159
+ self.ups.append(
160
+ weight_norm(
161
+ ConvTranspose1d(
162
+ h.upsample_initial_channel // (2**i),
163
+ h.upsample_initial_channel // (2 ** (i + 1)),
164
+ k,
165
+ u,
166
+ padding=(k - u) // 2,
167
+ )
168
+ )
169
+ )
170
+
171
+ self.resblocks = nn.ModuleList()
172
+ for i in range(len(self.ups)):
173
+ ch = h.upsample_initial_channel // (2 ** (i + 1))
174
+ for _, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
175
+ self.resblocks.append(resblock(h, ch, k, d))
176
+
177
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
178
+ self.ups.apply(init_weights)
179
+ self.conv_post.apply(init_weights)
180
+
181
+ def forward(self, x):
182
+ x = self.conv_pre(x)
183
+ for i in range(self.num_upsamples):
184
+ x = F.leaky_relu(x, LRELU_SLOPE)
185
+ x = self.ups[i](x)
186
+ xs = None
187
+ for j in range(self.num_kernels):
188
+ if xs is None:
189
+ xs = self.resblocks[i * self.num_kernels + j](x)
190
+ else:
191
+ xs += self.resblocks[i * self.num_kernels + j](x)
192
+ x = xs / self.num_kernels
193
+ x = F.leaky_relu(x)
194
+ x = self.conv_post(x)
195
+ x = torch.tanh(x)
196
+
197
+ return x
198
+
199
+ def remove_weight_norm(self):
200
+ print("Removing weight norm...")
201
+ for l in self.ups:
202
+ remove_weight_norm(l)
203
+ for l in self.resblocks:
204
+ l.remove_weight_norm()
205
+ remove_weight_norm(self.conv_pre)
206
+ remove_weight_norm(self.conv_post)
207
+
208
+
209
+ class DiscriminatorP(torch.nn.Module):
210
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
211
+ super().__init__()
212
+ self.period = period
213
+ norm_f = weight_norm if use_spectral_norm is False else spectral_norm
214
+ self.convs = nn.ModuleList(
215
+ [
216
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
217
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
218
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
219
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
220
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
221
+ ]
222
+ )
223
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
224
+
225
+ def forward(self, x):
226
+ fmap = []
227
+
228
+ # 1d to 2d
229
+ b, c, t = x.shape
230
+ if t % self.period != 0: # pad first
231
+ n_pad = self.period - (t % self.period)
232
+ x = F.pad(x, (0, n_pad), "reflect")
233
+ t = t + n_pad
234
+ x = x.view(b, c, t // self.period, self.period)
235
+
236
+ for l in self.convs:
237
+ x = l(x)
238
+ x = F.leaky_relu(x, LRELU_SLOPE)
239
+ fmap.append(x)
240
+ x = self.conv_post(x)
241
+ fmap.append(x)
242
+ x = torch.flatten(x, 1, -1)
243
+
244
+ return x, fmap
245
+
246
+
247
+ class MultiPeriodDiscriminator(torch.nn.Module):
248
+ def __init__(self):
249
+ super().__init__()
250
+ self.discriminators = nn.ModuleList(
251
+ [
252
+ DiscriminatorP(2),
253
+ DiscriminatorP(3),
254
+ DiscriminatorP(5),
255
+ DiscriminatorP(7),
256
+ DiscriminatorP(11),
257
+ ]
258
+ )
259
+
260
+ def forward(self, y, y_hat):
261
+ y_d_rs = []
262
+ y_d_gs = []
263
+ fmap_rs = []
264
+ fmap_gs = []
265
+ for _, d in enumerate(self.discriminators):
266
+ y_d_r, fmap_r = d(y)
267
+ y_d_g, fmap_g = d(y_hat)
268
+ y_d_rs.append(y_d_r)
269
+ fmap_rs.append(fmap_r)
270
+ y_d_gs.append(y_d_g)
271
+ fmap_gs.append(fmap_g)
272
+
273
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
274
+
275
+
276
+ class DiscriminatorS(torch.nn.Module):
277
+ def __init__(self, use_spectral_norm=False):
278
+ super().__init__()
279
+ norm_f = weight_norm if use_spectral_norm is False else spectral_norm
280
+ self.convs = nn.ModuleList(
281
+ [
282
+ norm_f(Conv1d(1, 128, 15, 1, padding=7)),
283
+ norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
284
+ norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
285
+ norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
286
+ norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
287
+ norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
288
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
289
+ ]
290
+ )
291
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
292
+
293
+ def forward(self, x):
294
+ fmap = []
295
+ for l in self.convs:
296
+ x = l(x)
297
+ x = F.leaky_relu(x, LRELU_SLOPE)
298
+ fmap.append(x)
299
+ x = self.conv_post(x)
300
+ fmap.append(x)
301
+ x = torch.flatten(x, 1, -1)
302
+
303
+ return x, fmap
304
+
305
+
306
+ class MultiScaleDiscriminator(torch.nn.Module):
307
+ def __init__(self):
308
+ super().__init__()
309
+ self.discriminators = nn.ModuleList(
310
+ [
311
+ DiscriminatorS(use_spectral_norm=True),
312
+ DiscriminatorS(),
313
+ DiscriminatorS(),
314
+ ]
315
+ )
316
+ self.meanpools = nn.ModuleList([AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)])
317
+
318
+ def forward(self, y, y_hat):
319
+ y_d_rs = []
320
+ y_d_gs = []
321
+ fmap_rs = []
322
+ fmap_gs = []
323
+ for i, d in enumerate(self.discriminators):
324
+ if i != 0:
325
+ y = self.meanpools[i - 1](y)
326
+ y_hat = self.meanpools[i - 1](y_hat)
327
+ y_d_r, fmap_r = d(y)
328
+ y_d_g, fmap_g = d(y_hat)
329
+ y_d_rs.append(y_d_r)
330
+ fmap_rs.append(fmap_r)
331
+ y_d_gs.append(y_d_g)
332
+ fmap_gs.append(fmap_g)
333
+
334
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
335
+
336
+
337
+ def feature_loss(fmap_r, fmap_g):
338
+ loss = 0
339
+ for dr, dg in zip(fmap_r, fmap_g):
340
+ for rl, gl in zip(dr, dg):
341
+ loss += torch.mean(torch.abs(rl - gl))
342
+
343
+ return loss * 2
344
+
345
+
346
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
347
+ loss = 0
348
+ r_losses = []
349
+ g_losses = []
350
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
351
+ r_loss = torch.mean((1 - dr) ** 2)
352
+ g_loss = torch.mean(dg**2)
353
+ loss += r_loss + g_loss
354
+ r_losses.append(r_loss.item())
355
+ g_losses.append(g_loss.item())
356
+
357
+ return loss, r_losses, g_losses
358
+
359
+
360
+ def generator_loss(disc_outputs):
361
+ loss = 0
362
+ gen_losses = []
363
+ for dg in disc_outputs:
364
+ l = torch.mean((1 - dg) ** 2)
365
+ gen_losses.append(l)
366
+ loss += l
367
+
368
+ return loss, gen_losses
hifigan/xutils.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jik876/hifi-gan """
2
+
3
+ import glob
4
+ import os
5
+
6
+ import matplotlib
7
+ import torch
8
+ from torch.nn.utils import weight_norm
9
+
10
+ matplotlib.use("Agg")
11
+ import matplotlib.pylab as plt
12
+
13
+
14
+ def plot_spectrogram(spectrogram):
15
+ fig, ax = plt.subplots(figsize=(10, 2))
16
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
17
+ plt.colorbar(im, ax=ax)
18
+
19
+ fig.canvas.draw()
20
+ plt.close()
21
+
22
+ return fig
23
+
24
+
25
+ def init_weights(m, mean=0.0, std=0.01):
26
+ classname = m.__class__.__name__
27
+ if classname.find("Conv") != -1:
28
+ m.weight.data.normal_(mean, std)
29
+
30
+
31
+ def apply_weight_norm(m):
32
+ classname = m.__class__.__name__
33
+ if classname.find("Conv") != -1:
34
+ weight_norm(m)
35
+
36
+
37
+ def get_padding(kernel_size, dilation=1):
38
+ return int((kernel_size * dilation - dilation) / 2)
39
+
40
+
41
+ def load_checkpoint(filepath, device):
42
+ assert os.path.isfile(filepath)
43
+ print(f"Loading '{filepath}'")
44
+ checkpoint_dict = torch.load(filepath, map_location=device)
45
+ print("Complete.")
46
+ return checkpoint_dict
47
+
48
+
49
+ def save_checkpoint(filepath, obj):
50
+ print(f"Saving checkpoint to {filepath}")
51
+ torch.save(obj, filepath)
52
+ print("Complete.")
53
+
54
+
55
+ def scan_checkpoint(cp_dir, prefix):
56
+ pattern = os.path.join(cp_dir, prefix + "????????")
57
+ cp_list = glob.glob(pattern)
58
+ if len(cp_list) == 0:
59
+ return None
60
+ return sorted(cp_list)[-1]
pflow/__init__.py ADDED
File without changes
pflow/data/__init__.py ADDED
File without changes
pflow/data/components/__init__.py ADDED
File without changes
pflow/data/text_mel_datamodule.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from typing import Any, Dict, Optional
3
+
4
+ import torch
5
+ import torchaudio as ta
6
+ from lightning import LightningDataModule
7
+ from torch.utils.data.dataloader import DataLoader
8
+
9
+ from pflow.text import text_to_sequence
10
+ from pflow.utils.audio import mel_spectrogram
11
+ from pflow.utils.model import fix_len_compatibility, normalize
12
+ from pflow.utils.utils import intersperse
13
+
14
+
15
+ def parse_filelist(filelist_path, split_char="|"):
16
+ with open(filelist_path, encoding="utf-8") as f:
17
+ filepaths_and_text = [line.strip().split(split_char) for line in f]
18
+ return filepaths_and_text
19
+
20
+
21
+ class TextMelDataModule(LightningDataModule):
22
+ def __init__( # pylint: disable=unused-argument
23
+ self,
24
+ name,
25
+ train_filelist_path,
26
+ valid_filelist_path,
27
+ batch_size,
28
+ num_workers,
29
+ pin_memory,
30
+ cleaners,
31
+ add_blank,
32
+ n_spks,
33
+ n_fft,
34
+ n_feats,
35
+ sample_rate,
36
+ hop_length,
37
+ win_length,
38
+ f_min,
39
+ f_max,
40
+ data_statistics,
41
+ seed,
42
+ ):
43
+ super().__init__()
44
+
45
+ # this line allows to access init params with 'self.hparams' attribute
46
+ # also ensures init params will be stored in ckpt
47
+ self.save_hyperparameters(logger=False)
48
+
49
+ def setup(self, stage: Optional[str] = None): # pylint: disable=unused-argument
50
+ """Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
51
+
52
+ This method is called by lightning with both `trainer.fit()` and `trainer.test()`, so be
53
+ careful not to execute things like random split twice!
54
+ """
55
+ # load and split datasets only if not loaded already
56
+
57
+ self.trainset = TextMelDataset( # pylint: disable=attribute-defined-outside-init
58
+ self.hparams.train_filelist_path,
59
+ self.hparams.n_spks,
60
+ self.hparams.cleaners,
61
+ self.hparams.add_blank,
62
+ self.hparams.n_fft,
63
+ self.hparams.n_feats,
64
+ self.hparams.sample_rate,
65
+ self.hparams.hop_length,
66
+ self.hparams.win_length,
67
+ self.hparams.f_min,
68
+ self.hparams.f_max,
69
+ self.hparams.data_statistics,
70
+ self.hparams.seed,
71
+ )
72
+ self.validset = TextMelDataset( # pylint: disable=attribute-defined-outside-init
73
+ self.hparams.valid_filelist_path,
74
+ self.hparams.n_spks,
75
+ self.hparams.cleaners,
76
+ self.hparams.add_blank,
77
+ self.hparams.n_fft,
78
+ self.hparams.n_feats,
79
+ self.hparams.sample_rate,
80
+ self.hparams.hop_length,
81
+ self.hparams.win_length,
82
+ self.hparams.f_min,
83
+ self.hparams.f_max,
84
+ self.hparams.data_statistics,
85
+ self.hparams.seed,
86
+ )
87
+
88
+ def train_dataloader(self):
89
+ return DataLoader(
90
+ dataset=self.trainset,
91
+ batch_size=self.hparams.batch_size,
92
+ num_workers=self.hparams.num_workers,
93
+ pin_memory=self.hparams.pin_memory,
94
+ shuffle=True,
95
+ collate_fn=TextMelBatchCollate(self.hparams.n_spks),
96
+ )
97
+
98
+ def val_dataloader(self):
99
+ return DataLoader(
100
+ dataset=self.validset,
101
+ batch_size=self.hparams.batch_size,
102
+ num_workers=self.hparams.num_workers,
103
+ pin_memory=self.hparams.pin_memory,
104
+ shuffle=False,
105
+ collate_fn=TextMelBatchCollate(self.hparams.n_spks),
106
+ )
107
+
108
+ def teardown(self, stage: Optional[str] = None):
109
+ """Clean up after fit or test."""
110
+ pass # pylint: disable=unnecessary-pass
111
+
112
+ def state_dict(self): # pylint: disable=no-self-use
113
+ """Extra things to save to checkpoint."""
114
+ return {}
115
+
116
+ def load_state_dict(self, state_dict: Dict[str, Any]):
117
+ """Things to do when loading checkpoint."""
118
+ pass # pylint: disable=unnecessary-pass
119
+
120
+
121
+ class TextMelDataset(torch.utils.data.Dataset):
122
+ def __init__(
123
+ self,
124
+ filelist_path,
125
+ n_spks,
126
+ cleaners,
127
+ add_blank=True,
128
+ n_fft=1024,
129
+ n_mels=80,
130
+ sample_rate=22050,
131
+ hop_length=256,
132
+ win_length=1024,
133
+ f_min=0.0,
134
+ f_max=8000,
135
+ data_parameters=None,
136
+ seed=None,
137
+ ):
138
+ self.filepaths_and_text = parse_filelist(filelist_path)
139
+ self.n_spks = n_spks
140
+ self.cleaners = cleaners
141
+ self.add_blank = add_blank
142
+ self.n_fft = n_fft
143
+ self.n_mels = n_mels
144
+ self.sample_rate = sample_rate
145
+ self.hop_length = hop_length
146
+ self.win_length = win_length
147
+ self.f_min = f_min
148
+ self.f_max = f_max
149
+ if data_parameters is not None:
150
+ self.data_parameters = data_parameters
151
+ else:
152
+ self.data_parameters = {"mel_mean": 0, "mel_std": 1}
153
+ random.seed(seed)
154
+ random.shuffle(self.filepaths_and_text)
155
+
156
+ def get_datapoint(self, filepath_and_text):
157
+ if self.n_spks > 1:
158
+ filepath, spk, text = (
159
+ filepath_and_text[0],
160
+ int(filepath_and_text[1]),
161
+ filepath_and_text[2],
162
+ )
163
+ else:
164
+ filepath, text = filepath_and_text[0], filepath_and_text[1]
165
+ spk = None
166
+
167
+ text = self.get_text(text, add_blank=self.add_blank)
168
+ mel, audio = self.get_mel(filepath)
169
+ # TODO: make dictionary to get different spec for same speaker
170
+ # right now naively repeating target mel for testing purposes
171
+ return {"x": text, "y": mel, "spk": spk, "wav":audio}
172
+
173
+ def get_mel(self, filepath):
174
+ audio, sr = ta.load(filepath)
175
+ assert sr == self.sample_rate
176
+ mel = mel_spectrogram(
177
+ audio,
178
+ self.n_fft,
179
+ self.n_mels,
180
+ self.sample_rate,
181
+ self.hop_length,
182
+ self.win_length,
183
+ self.f_min,
184
+ self.f_max,
185
+ center=False,
186
+ ).squeeze()
187
+ mel = normalize(mel, self.data_parameters["mel_mean"], self.data_parameters["mel_std"])
188
+ return mel, audio
189
+
190
+ def get_text(self, text, add_blank=True):
191
+ text_norm = text_to_sequence(text, self.cleaners)
192
+ if self.add_blank:
193
+ text_norm = intersperse(text_norm, 0)
194
+ text_norm = torch.IntTensor(text_norm)
195
+ return text_norm
196
+
197
+ def __getitem__(self, index):
198
+ datapoint = self.get_datapoint(self.filepaths_and_text[index])
199
+ if datapoint["wav"].shape[1] <= 66150:
200
+ '''
201
+ skip datapoint if too short (3s)
202
+ TODO To not waste data, we can concatenate wavs less than 3s and use them
203
+ TODO as a hyperparameter; multispeaker dataset can use another wav of same speaker
204
+ '''
205
+ return self.__getitem__(random.randint(0, len(self.filepaths_and_text)-1))
206
+ return datapoint
207
+
208
+ def __len__(self):
209
+ return len(self.filepaths_and_text)
210
+
211
+
212
+ class TextMelBatchCollate:
213
+ def __init__(self, n_spks):
214
+ self.n_spks = n_spks
215
+
216
+ def __call__(self, batch):
217
+ B = len(batch)
218
+ y_max_length = max([item["y"].shape[-1] for item in batch])
219
+ y_max_length = fix_len_compatibility(y_max_length)
220
+ wav_max_length = y_max_length * 256
221
+ x_max_length = max([item["x"].shape[-1] for item in batch])
222
+ n_feats = batch[0]["y"].shape[-2]
223
+
224
+ y = torch.zeros((B, n_feats, y_max_length), dtype=torch.float32)
225
+ x = torch.zeros((B, x_max_length), dtype=torch.long)
226
+ wav = torch.zeros((B, 1, wav_max_length), dtype=torch.float32)
227
+ y_lengths, x_lengths = [], []
228
+ wav_lengths = []
229
+ spks = []
230
+ for i, item in enumerate(batch):
231
+ y_, x_ = item["y"], item["x"]
232
+ wav_ = item["wav"][:,:wav_max_length] if item["wav"].shape[-1] > wav_max_length else item["wav"]
233
+ y_lengths.append(y_.shape[-1])
234
+ x_lengths.append(x_.shape[-1])
235
+ wav_lengths.append(wav_.shape[-1])
236
+ y[i, :, : y_.shape[-1]] = y_
237
+ x[i, : x_.shape[-1]] = x_
238
+ wav[i, :, : wav_.shape[-1]] = wav_
239
+ spks.append(item["spk"])
240
+
241
+ y_lengths = torch.tensor(y_lengths, dtype=torch.long)
242
+ x_lengths = torch.tensor(x_lengths, dtype=torch.long)
243
+ wav_lengths = torch.tensor(wav_lengths, dtype=torch.long)
244
+ spks = torch.tensor(spks, dtype=torch.long) if self.n_spks > 1 else None
245
+
246
+ return {
247
+ "x": x,
248
+ "x_lengths": x_lengths,
249
+ "y": y,
250
+ "y_lengths": y_lengths,
251
+ "spks": spks,
252
+ "wav":wav,
253
+ "wav_lengths":wav_lengths,
254
+ "prompt_spec": y,
255
+ "prompt_lengths": y_lengths,
256
+ }
pflow/models/__init__.py ADDED
File without changes
pflow/models/baselightningmodule.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is a base lightning module that can be used to train a model.
3
+ The benefit of this abstraction is that all the logic outside of model definition can be reused for different models.
4
+ """
5
+ import inspect
6
+ from abc import ABC
7
+ from typing import Any, Dict
8
+
9
+ import torch
10
+ from lightning import LightningModule
11
+ from lightning.pytorch.utilities import grad_norm
12
+
13
+ from pflow import utils
14
+ from pflow.utils.utils import plot_tensor
15
+ from pflow.models.components import commons
16
+
17
+ log = utils.get_pylogger(__name__)
18
+
19
+
20
+ class BaseLightningClass(LightningModule, ABC):
21
+ def update_data_statistics(self, data_statistics):
22
+ if data_statistics is None:
23
+ data_statistics = {
24
+ "mel_mean": 0.0,
25
+ "mel_std": 1.0,
26
+ }
27
+
28
+ self.register_buffer("mel_mean", torch.tensor(data_statistics["mel_mean"]))
29
+ self.register_buffer("mel_std", torch.tensor(data_statistics["mel_std"]))
30
+
31
+ def configure_optimizers(self) -> Any:
32
+ optimizer = self.hparams.optimizer(params=self.parameters())
33
+ if self.hparams.scheduler not in (None, {}):
34
+ scheduler_args = {}
35
+ # Manage last epoch for exponential schedulers
36
+ if "last_epoch" in inspect.signature(self.hparams.scheduler.scheduler).parameters:
37
+ if hasattr(self, "ckpt_loaded_epoch"):
38
+ current_epoch = self.ckpt_loaded_epoch - 1
39
+ else:
40
+ current_epoch = -1
41
+
42
+ scheduler_args.update({"optimizer": optimizer})
43
+ scheduler = self.hparams.scheduler.scheduler(**scheduler_args)
44
+ print(self.ckpt_loaded_epoch - 1)
45
+ if hasattr(self, "ckpt_loaded_epoch"):
46
+ scheduler.last_epoch = self.ckpt_loaded_epoch - 1
47
+ else:
48
+ scheduler.last_epoch = -1
49
+ return {
50
+ "optimizer": optimizer,
51
+ "lr_scheduler": {
52
+ "scheduler": scheduler,
53
+ # "interval": self.hparams.scheduler.lightning_args.interval,
54
+ # "frequency": self.hparams.scheduler.lightning_args.frequency,
55
+ # "name": "learning_rate",
56
+ "monitor": "val_loss",
57
+ },
58
+ }
59
+
60
+ return {"optimizer": optimizer}
61
+
62
+ def get_losses(self, batch):
63
+ x, x_lengths = batch["x"], batch["x_lengths"]
64
+ y, y_lengths = batch["y"], batch["y_lengths"]
65
+ # prompt_spec = batch["prompt_spec"]
66
+ # prompt_lengths = batch["prompt_lengths"]
67
+ # prompt_slice, ids_slice = commons.rand_slice_segments(
68
+ # prompt_spec,
69
+ # prompt_lengths,
70
+ # self.prompt_size
71
+ # )
72
+ prompt_slice = None
73
+ dur_loss, prior_loss, diff_loss, attn = self(
74
+ x=x,
75
+ x_lengths=x_lengths,
76
+ y=y,
77
+ y_lengths=y_lengths,
78
+ prompt=prompt_slice,
79
+ )
80
+ return ({
81
+ "dur_loss": dur_loss,
82
+ "prior_loss": prior_loss,
83
+ "diff_loss": diff_loss,
84
+ },
85
+ {
86
+ "attn": attn
87
+ }
88
+ )
89
+
90
+ def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
91
+ self.ckpt_loaded_epoch = checkpoint["epoch"] # pylint: disable=attribute-defined-outside-init
92
+
93
+ def training_step(self, batch: Any, batch_idx: int):
94
+ loss_dict, attn_dict = self.get_losses(batch)
95
+
96
+ self.log(
97
+ "step",
98
+ float(self.global_step),
99
+ on_step=True,
100
+ on_epoch=True,
101
+ logger=True,
102
+ sync_dist=True,
103
+ )
104
+
105
+ self.log(
106
+ "sub_loss/train_dur_loss",
107
+ loss_dict["dur_loss"],
108
+ on_step=True,
109
+ on_epoch=True,
110
+ logger=True,
111
+ sync_dist=True,
112
+ )
113
+ self.log(
114
+ "sub_loss/train_prior_loss",
115
+ loss_dict["prior_loss"],
116
+ on_step=True,
117
+ on_epoch=True,
118
+ logger=True,
119
+ sync_dist=True,
120
+ )
121
+ self.log(
122
+ "sub_loss/train_diff_loss",
123
+ loss_dict["diff_loss"],
124
+ on_step=True,
125
+ on_epoch=True,
126
+ logger=True,
127
+ sync_dist=True,
128
+ )
129
+
130
+ total_loss = sum(loss_dict.values())
131
+ self.log(
132
+ "loss/train",
133
+ total_loss,
134
+ on_step=True,
135
+ on_epoch=True,
136
+ logger=True,
137
+ prog_bar=True,
138
+ sync_dist=True,
139
+ )
140
+ attn = attn_dict["attn"][0]
141
+ self.logger.experiment.add_image(
142
+ f"train/alignment",
143
+ plot_tensor(attn.cpu()),
144
+ self.current_epoch,
145
+ dataformats="HWC",
146
+ )
147
+ return {"loss": total_loss, "log": loss_dict}
148
+
149
+ def validation_step(self, batch: Any, batch_idx: int):
150
+ loss_dict, attn_dict = self.get_losses(batch)
151
+ self.log(
152
+ "sub_loss/val_dur_loss",
153
+ loss_dict["dur_loss"],
154
+ on_step=True,
155
+ on_epoch=True,
156
+ logger=True,
157
+ sync_dist=True,
158
+ )
159
+ self.log(
160
+ "sub_loss/val_prior_loss",
161
+ loss_dict["prior_loss"],
162
+ on_step=True,
163
+ on_epoch=True,
164
+ logger=True,
165
+ sync_dist=True,
166
+ )
167
+ self.log(
168
+ "sub_loss/val_diff_loss",
169
+ loss_dict["diff_loss"],
170
+ on_step=True,
171
+ on_epoch=True,
172
+ logger=True,
173
+ sync_dist=True,
174
+ )
175
+
176
+ total_loss = sum(loss_dict.values())
177
+ self.log(
178
+ "loss/val",
179
+ total_loss,
180
+ on_step=True,
181
+ on_epoch=True,
182
+ logger=True,
183
+ prog_bar=True,
184
+ sync_dist=True,
185
+ )
186
+
187
+ attn = attn_dict["attn"][0]
188
+ self.logger.experiment.add_image(
189
+ f"val/alignment",
190
+ plot_tensor(attn.cpu()),
191
+ self.current_epoch,
192
+ dataformats="HWC",
193
+ )
194
+ return total_loss
195
+
196
+ def on_validation_end(self) -> None:
197
+ if self.trainer.is_global_zero:
198
+ one_batch = next(iter(self.trainer.val_dataloaders))
199
+
200
+ if self.current_epoch == 0:
201
+ log.debug("Plotting original samples")
202
+ for i in range(2):
203
+ y = one_batch["y"][i].unsqueeze(0).to(self.device)
204
+ self.logger.experiment.add_image(
205
+ f"original/{i}",
206
+ plot_tensor(y.squeeze().cpu()),
207
+ self.current_epoch,
208
+ dataformats="HWC",
209
+ )
210
+
211
+ log.debug("Synthesising...")
212
+ for i in range(2):
213
+ x = one_batch["x"][i].unsqueeze(0).to(self.device)
214
+ x_lengths = one_batch["x_lengths"][i].unsqueeze(0).to(self.device)
215
+ y = one_batch["y"][i].unsqueeze(0).to(self.device)
216
+ y_lengths = one_batch["y_lengths"][i].unsqueeze(0).to(self.device)
217
+ # prompt = one_batch["prompt_spec"][i].unsqueeze(0).to(self.device)
218
+ # prompt_lengths = one_batch["prompt_lengths"][i].unsqueeze(0).to(self.device)
219
+ prompt = y
220
+ prompt_lengths = y_lengths
221
+ prompt_slice, ids_slice = commons.rand_slice_segments(
222
+ prompt, prompt_lengths, self.prompt_size
223
+ )
224
+ output = self.synthesise(x[:, :x_lengths], x_lengths, prompt=prompt_slice, n_timesteps=10, guidance_scale=0.0)
225
+ y_enc, y_dec = output["encoder_outputs"], output["decoder_outputs"]
226
+ attn = output["attn"]
227
+ self.logger.experiment.add_image(
228
+ f"generated_enc/{i}",
229
+ plot_tensor(y_enc.squeeze().cpu()),
230
+ self.current_epoch,
231
+ dataformats="HWC",
232
+ )
233
+ self.logger.experiment.add_image(
234
+ f"generated_dec/{i}",
235
+ plot_tensor(y_dec.squeeze().cpu()),
236
+ self.current_epoch,
237
+ dataformats="HWC",
238
+ )
239
+ self.logger.experiment.add_image(
240
+ f"alignment/{i}",
241
+ plot_tensor(attn.squeeze().cpu()),
242
+ self.current_epoch,
243
+ dataformats="HWC",
244
+ )
245
+
246
+ def on_before_optimizer_step(self, optimizer):
247
+ self.log_dict({f"grad_norm/{k}": v for k, v in grad_norm(self, norm_type=2).items()})
pflow/models/components/__init__.py ADDED
File without changes
pflow/models/components/aligner.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+ import numpy as np
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+ from torch.nn import Module
7
+ import torch.nn.functional as F
8
+
9
+ from einops import rearrange, repeat
10
+
11
+ from beartype import beartype
12
+ from beartype.typing import Optional
13
+
14
+ def exists(val):
15
+ return val is not None
16
+
17
+ class AlignerNet(Module):
18
+ """alignment model https://arxiv.org/pdf/2108.10447.pdf """
19
+ def __init__(
20
+ self,
21
+ dim_in=80,
22
+ dim_hidden=512,
23
+ attn_channels=80,
24
+ temperature=0.0005,
25
+ ):
26
+ super().__init__()
27
+ self.temperature = temperature
28
+
29
+ self.key_layers = nn.ModuleList([
30
+ nn.Conv1d(
31
+ dim_hidden,
32
+ dim_hidden * 2,
33
+ kernel_size=3,
34
+ padding=1,
35
+ bias=True,
36
+ ),
37
+ nn.ReLU(inplace=True),
38
+ nn.Conv1d(dim_hidden * 2, attn_channels, kernel_size=1, padding=0, bias=True)
39
+ ])
40
+
41
+ self.query_layers = nn.ModuleList([
42
+ nn.Conv1d(
43
+ dim_in,
44
+ dim_in * 2,
45
+ kernel_size=3,
46
+ padding=1,
47
+ bias=True,
48
+ ),
49
+ nn.ReLU(inplace=True),
50
+ nn.Conv1d(dim_in * 2, dim_in, kernel_size=1, padding=0, bias=True),
51
+ nn.ReLU(inplace=True),
52
+ nn.Conv1d(dim_in, attn_channels, kernel_size=1, padding=0, bias=True)
53
+ ])
54
+
55
+ @beartype
56
+ def forward(
57
+ self,
58
+ queries: Tensor,
59
+ keys: Tensor,
60
+ mask: Optional[Tensor] = None
61
+ ):
62
+ key_out = keys
63
+ for layer in self.key_layers:
64
+ key_out = layer(key_out)
65
+
66
+ query_out = queries
67
+ for layer in self.query_layers:
68
+ query_out = layer(query_out)
69
+
70
+ key_out = rearrange(key_out, 'b c t -> b t c')
71
+ query_out = rearrange(query_out, 'b c t -> b t c')
72
+
73
+ attn_logp = torch.cdist(query_out, key_out)
74
+ attn_logp = rearrange(attn_logp, 'b ... -> b 1 ...')
75
+
76
+ if exists(mask):
77
+ mask = rearrange(mask.bool(), '... c -> ... 1 c')
78
+ attn_logp.data.masked_fill_(~mask, -torch.finfo(attn_logp.dtype).max)
79
+
80
+ attn = attn_logp.softmax(dim = -1)
81
+ return attn, attn_logp
82
+
83
+ def pad_tensor(input, pad, value=0):
84
+ pad = [item for sublist in reversed(pad) for item in sublist] # Flatten the tuple
85
+ assert len(pad) // 2 == len(input.shape), 'Padding dimensions do not match input dimensions'
86
+ return F.pad(input, pad, mode='constant', value=value)
87
+
88
+ def maximum_path(value, mask, const=None):
89
+ device = value.device
90
+ dtype = value.dtype
91
+ if not exists(const):
92
+ const = torch.tensor(float('-inf')).to(device) # Patch for Sphinx complaint
93
+ value = value * mask
94
+
95
+ b, t_x, t_y = value.shape
96
+ direction = torch.zeros(value.shape, dtype=torch.int64, device=device)
97
+ v = torch.zeros((b, t_x), dtype=torch.float32, device=device)
98
+ x_range = torch.arange(t_x, dtype=torch.float32, device=device).view(1, -1)
99
+
100
+ for j in range(t_y):
101
+ v0 = pad_tensor(v, ((0, 0), (1, 0)), value = const)[:, :-1]
102
+ v1 = v
103
+ max_mask = v1 >= v0
104
+ v_max = torch.where(max_mask, v1, v0)
105
+ direction[:, :, j] = max_mask
106
+
107
+ index_mask = x_range <= j
108
+ v = torch.where(index_mask.view(1,-1), v_max + value[:, :, j], const)
109
+
110
+ direction = torch.where(mask.bool(), direction, 1)
111
+
112
+ path = torch.zeros(value.shape, dtype=torch.float32, device=device)
113
+ index = mask[:, :, 0].sum(1).long() - 1
114
+ index_range = torch.arange(b, device=device)
115
+
116
+ for j in reversed(range(t_y)):
117
+ path[index_range, index, j] = 1
118
+ index = index + direction[index_range, index, j] - 1
119
+
120
+ path = path * mask.float()
121
+ path = path.to(dtype=dtype)
122
+ return path
123
+
124
+ class ForwardSumLoss(Module):
125
+ def __init__(
126
+ self,
127
+ blank_logprob = -1
128
+ ):
129
+ super().__init__()
130
+ self.blank_logprob = blank_logprob
131
+
132
+ self.ctc_loss = torch.nn.CTCLoss(
133
+ blank = 0, # check this value
134
+ zero_infinity = True
135
+ )
136
+
137
+ def forward(self, attn_logprob, key_lens, query_lens):
138
+ device, blank_logprob = attn_logprob.device, self.blank_logprob
139
+ max_key_len = attn_logprob.size(-1)
140
+
141
+ # Reorder input to [query_len, batch_size, key_len]
142
+ attn_logprob = rearrange(attn_logprob, 'b 1 c t -> c b t')
143
+
144
+ # Add blank label
145
+ attn_logprob = F.pad(attn_logprob, (1, 0, 0, 0, 0, 0), value = blank_logprob)
146
+
147
+ # Convert to log probabilities
148
+ # Note: Mask out probs beyond key_len
149
+ mask_value = -torch.finfo(attn_logprob.dtype).max
150
+ attn_logprob.masked_fill_(torch.arange(max_key_len + 1, device=device, dtype=torch.long).view(1, 1, -1) > key_lens.view(1, -1, 1), mask_value)
151
+
152
+ attn_logprob = attn_logprob.log_softmax(dim = -1)
153
+
154
+ # Target sequences
155
+ target_seqs = torch.arange(1, max_key_len + 1, device=device, dtype=torch.long)
156
+ target_seqs = repeat(target_seqs, 'n -> b n', b = key_lens.numel())
157
+
158
+ # Evaluate CTC loss
159
+ cost = self.ctc_loss(attn_logprob, target_seqs, query_lens, key_lens)
160
+
161
+ return cost
162
+
163
+ class BinLoss(Module):
164
+ def forward(self, attn_hard, attn_logprob, key_lens):
165
+ batch, device = attn_logprob.shape[0], attn_logprob.device
166
+ max_key_len = attn_logprob.size(-1)
167
+
168
+ # Reorder input to [query_len, batch_size, key_len]
169
+ attn_logprob = rearrange(attn_logprob, 'b 1 c t -> c b t')
170
+ attn_hard = rearrange(attn_hard, 'b t c -> c b t')
171
+
172
+ mask_value = -torch.finfo(attn_logprob.dtype).max
173
+
174
+ attn_logprob.masked_fill_(torch.arange(max_key_len, device=device, dtype=torch.long).view(1, 1, -1) > key_lens.view(1, -1, 1), mask_value)
175
+ attn_logprob = attn_logprob.log_softmax(dim = -1)
176
+
177
+ return (attn_hard * attn_logprob).sum() / batch
178
+
179
+ class Aligner(Module):
180
+ def __init__(
181
+ self,
182
+ dim_in,
183
+ dim_hidden,
184
+ attn_channels=80,
185
+ temperature=0.0005
186
+ ):
187
+ super().__init__()
188
+ self.dim_in = dim_in
189
+ self.dim_hidden = dim_hidden
190
+ self.attn_channels = attn_channels
191
+ self.temperature = temperature
192
+ self.aligner = AlignerNet(
193
+ dim_in = self.dim_in,
194
+ dim_hidden = self.dim_hidden,
195
+ attn_channels = self.attn_channels,
196
+ temperature = self.temperature
197
+ )
198
+
199
+ def forward(
200
+ self,
201
+ x,
202
+ x_mask,
203
+ y,
204
+ y_mask
205
+ ):
206
+ alignment_soft, alignment_logprob = self.aligner(y, rearrange(x, 'b d t -> b t d'), x_mask)
207
+
208
+ x_mask = rearrange(x_mask, '... i -> ... i 1')
209
+ y_mask = rearrange(y_mask, '... j -> ... 1 j')
210
+ attn_mask = x_mask * y_mask
211
+ attn_mask = rearrange(attn_mask, 'b 1 i j -> b i j')
212
+
213
+ alignment_soft = rearrange(alignment_soft, 'b 1 c t -> b t c')
214
+ alignment_mask = maximum_path(alignment_soft, attn_mask)
215
+
216
+ alignment_hard = torch.sum(alignment_mask, -1).int()
217
+ return alignment_hard, alignment_soft, alignment_logprob, alignment_mask
218
+
219
+ if __name__ == '__main__':
220
+ batch_size = 10
221
+ seq_len_y = 200 # length of sequence y
222
+ seq_len_x = 35
223
+ feature_dim = 80 # feature dimension
224
+
225
+ x = torch.randn(batch_size, 512, seq_len_x)
226
+ x = x.transpose(1,2) #dim-1 is the channels for conv
227
+ y = torch.randn(batch_size, seq_len_y, feature_dim)
228
+ y = y.transpose(1,2) #dim-1 is the channels for conv
229
+
230
+ # Create masks
231
+ x_mask = torch.ones(batch_size, 1, seq_len_x)
232
+ y_mask = torch.ones(batch_size, 1, seq_len_y)
233
+
234
+ align = Aligner(dim_in = 80, dim_hidden=512, attn_channels=80)
235
+ alignment_hard, alignment_soft, alignment_logprob, alignment_mas = align(x, x_mask, y, y_mask)
pflow/models/components/attentions.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://github.com/jaywalnut310/vits
2
+ import math
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ import commons
8
+ from modules import LayerNorm
9
+
10
+
11
+ class Encoder(nn.Module): # backward compatible vits2 encoder
12
+ def __init__(
13
+ self,
14
+ hidden_channels,
15
+ filter_channels,
16
+ n_heads,
17
+ n_layers,
18
+ kernel_size=1,
19
+ p_dropout=0.0,
20
+ window_size=4,
21
+ **kwargs
22
+ ):
23
+ super().__init__()
24
+ self.hidden_channels = hidden_channels
25
+ self.filter_channels = filter_channels
26
+ self.n_heads = n_heads
27
+ self.n_layers = n_layers
28
+ self.kernel_size = kernel_size
29
+ self.p_dropout = p_dropout
30
+ self.window_size = window_size
31
+
32
+ self.drop = nn.Dropout(p_dropout)
33
+ self.attn_layers = nn.ModuleList()
34
+ self.norm_layers_1 = nn.ModuleList()
35
+ self.ffn_layers = nn.ModuleList()
36
+ self.norm_layers_2 = nn.ModuleList()
37
+ # if kwargs has spk_emb_dim, then add a linear layer to project spk_emb_dim to hidden_channels
38
+ self.cond_layer_idx = self.n_layers
39
+ if "gin_channels" in kwargs:
40
+ self.gin_channels = kwargs["gin_channels"]
41
+ if self.gin_channels != 0:
42
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
43
+ # vits2 says 3rd block, so idx is 2 by default
44
+ self.cond_layer_idx = (
45
+ kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
46
+ )
47
+ assert (
48
+ self.cond_layer_idx < self.n_layers
49
+ ), "cond_layer_idx should be less than n_layers"
50
+
51
+ for i in range(self.n_layers):
52
+ self.attn_layers.append(
53
+ MultiHeadAttention(
54
+ hidden_channels,
55
+ hidden_channels,
56
+ n_heads,
57
+ p_dropout=p_dropout,
58
+ window_size=window_size,
59
+ )
60
+ )
61
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
62
+ self.ffn_layers.append(
63
+ FFN(
64
+ hidden_channels,
65
+ hidden_channels,
66
+ filter_channels,
67
+ kernel_size,
68
+ p_dropout=p_dropout,
69
+ )
70
+ )
71
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
72
+
73
+ def forward(self, x, x_mask, g=None):
74
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
75
+ x = x * x_mask
76
+ for i in range(self.n_layers):
77
+ if i == self.cond_layer_idx and g is not None:
78
+ g = self.spk_emb_linear(g.transpose(1, 2))
79
+ g = g.transpose(1, 2)
80
+ x = x + g
81
+ x = x * x_mask
82
+ y = self.attn_layers[i](x, x, attn_mask)
83
+ y = self.drop(y)
84
+ x = self.norm_layers_1[i](x + y)
85
+
86
+ y = self.ffn_layers[i](x, x_mask)
87
+ y = self.drop(y)
88
+ x = self.norm_layers_2[i](x + y)
89
+ x = x * x_mask
90
+ return x
91
+
92
+
93
+ class Decoder(nn.Module):
94
+ def __init__(
95
+ self,
96
+ hidden_channels,
97
+ filter_channels,
98
+ n_heads,
99
+ n_layers,
100
+ kernel_size=1,
101
+ p_dropout=0.0,
102
+ proximal_bias=False,
103
+ proximal_init=True,
104
+ **kwargs
105
+ ):
106
+ super().__init__()
107
+ self.hidden_channels = hidden_channels
108
+ self.filter_channels = filter_channels
109
+ self.n_heads = n_heads
110
+ self.n_layers = n_layers
111
+ self.kernel_size = kernel_size
112
+ self.p_dropout = p_dropout
113
+ self.proximal_bias = proximal_bias
114
+ self.proximal_init = proximal_init
115
+
116
+ self.drop = nn.Dropout(p_dropout)
117
+ self.self_attn_layers = nn.ModuleList()
118
+ self.norm_layers_0 = nn.ModuleList()
119
+ self.encdec_attn_layers = nn.ModuleList()
120
+ self.norm_layers_1 = nn.ModuleList()
121
+ self.ffn_layers = nn.ModuleList()
122
+ self.norm_layers_2 = nn.ModuleList()
123
+ for i in range(self.n_layers):
124
+ self.self_attn_layers.append(
125
+ MultiHeadAttention(
126
+ hidden_channels,
127
+ hidden_channels,
128
+ n_heads,
129
+ p_dropout=p_dropout,
130
+ proximal_bias=proximal_bias,
131
+ proximal_init=proximal_init,
132
+ )
133
+ )
134
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
135
+ self.encdec_attn_layers.append(
136
+ MultiHeadAttention(
137
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
138
+ )
139
+ )
140
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
141
+ self.ffn_layers.append(
142
+ FFN(
143
+ hidden_channels,
144
+ hidden_channels,
145
+ filter_channels,
146
+ kernel_size,
147
+ p_dropout=p_dropout,
148
+ causal=True,
149
+ )
150
+ )
151
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
152
+
153
+ def forward(self, x, x_mask, h, h_mask):
154
+ """
155
+ x: decoder input
156
+ h: encoder output
157
+ """
158
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
159
+ device=x.device, dtype=x.dtype
160
+ )
161
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
162
+ x = x * x_mask
163
+ for i in range(self.n_layers):
164
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
165
+ y = self.drop(y)
166
+ x = self.norm_layers_0[i](x + y)
167
+
168
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
169
+ y = self.drop(y)
170
+ x = self.norm_layers_1[i](x + y)
171
+
172
+ y = self.ffn_layers[i](x, x_mask)
173
+ y = self.drop(y)
174
+ x = self.norm_layers_2[i](x + y)
175
+ x = x * x_mask
176
+ return x
177
+
178
+
179
+ class MultiHeadAttention(nn.Module):
180
+ def __init__(
181
+ self,
182
+ channels,
183
+ out_channels,
184
+ n_heads,
185
+ p_dropout=0.0,
186
+ window_size=None,
187
+ heads_share=True,
188
+ block_length=None,
189
+ proximal_bias=False,
190
+ proximal_init=False,
191
+ ):
192
+ super().__init__()
193
+ assert channels % n_heads == 0
194
+
195
+ self.channels = channels
196
+ self.out_channels = out_channels
197
+ self.n_heads = n_heads
198
+ self.p_dropout = p_dropout
199
+ self.window_size = window_size
200
+ self.heads_share = heads_share
201
+ self.block_length = block_length
202
+ self.proximal_bias = proximal_bias
203
+ self.proximal_init = proximal_init
204
+ self.attn = None
205
+
206
+ self.k_channels = channels // n_heads
207
+ self.conv_q = nn.Conv1d(channels, channels, 1)
208
+ self.conv_k = nn.Conv1d(channels, channels, 1)
209
+ self.conv_v = nn.Conv1d(channels, channels, 1)
210
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
211
+ self.drop = nn.Dropout(p_dropout)
212
+
213
+ if window_size is not None:
214
+ n_heads_rel = 1 if heads_share else n_heads
215
+ rel_stddev = self.k_channels**-0.5
216
+ self.emb_rel_k = nn.Parameter(
217
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
218
+ * rel_stddev
219
+ )
220
+ self.emb_rel_v = nn.Parameter(
221
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
222
+ * rel_stddev
223
+ )
224
+
225
+ nn.init.xavier_uniform_(self.conv_q.weight)
226
+ nn.init.xavier_uniform_(self.conv_k.weight)
227
+ nn.init.xavier_uniform_(self.conv_v.weight)
228
+ if proximal_init:
229
+ with torch.no_grad():
230
+ self.conv_k.weight.copy_(self.conv_q.weight)
231
+ self.conv_k.bias.copy_(self.conv_q.bias)
232
+
233
+ def forward(self, x, c, attn_mask=None):
234
+ q = self.conv_q(x)
235
+ k = self.conv_k(c)
236
+ v = self.conv_v(c)
237
+
238
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
239
+
240
+ x = self.conv_o(x)
241
+ return x
242
+
243
+ def attention(self, query, key, value, mask=None):
244
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
245
+ b, d, t_s, t_t = (*key.size(), query.size(2))
246
+ # query = query.view(
247
+ # b,
248
+ # self.n_heads,
249
+ # self.k_channels,
250
+ # t_t
251
+ # ).transpose(2, 3) #[b,h,t_t,c], d=h*c
252
+ # key = key.view(
253
+ # b,
254
+ # self.n_heads,
255
+ # self.k_channels,
256
+ # t_s
257
+ # ).transpose(2, 3) #[b,h,t_s,c]
258
+ # value = value.view(
259
+ # b,
260
+ # self.n_heads,
261
+ # self.k_channels,
262
+ # t_s
263
+ # ).transpose(2, 3) #[b,h,t_s,c]
264
+ # scores = torch.matmul(
265
+ # query / math.sqrt(self.k_channels), key.transpose(-2, -1)
266
+ # ) #[b,h,t_t,t_s]
267
+ query = query.view(b, self.n_heads, self.k_channels, t_t) # [b,h,c,t_t]
268
+ key = key.view(b, self.n_heads, self.k_channels, t_s) # [b,h,c,t_s]
269
+ value = value.view(b, self.n_heads, self.k_channels, t_s) # [b,h,c,t_s]
270
+ scores = torch.einsum(
271
+ "bhdt,bhds -> bhts", query / math.sqrt(self.k_channels), key
272
+ ) # [b,h,t_t,t_s]
273
+ # if self.window_size is not None:
274
+ # assert t_s == t_t, "Relative attention is only available for self-attention."
275
+ # key_relative_embeddings = self._get_relative_embeddings(
276
+ # self.emb_rel_k, t_s
277
+ # )
278
+ # rel_logits = self._matmul_with_relative_keys(
279
+ # query / math.sqrt(self.k_channels), key_relative_embeddings
280
+ # ) #[b,h,t_t,d],[h or 1,e,d] ->[b,h,t_t,e]
281
+ # scores_local = self._relative_position_to_absolute_position(rel_logits)
282
+ # scores = scores + scores_local
283
+ # if self.proximal_bias:
284
+ # assert t_s == t_t, "Proximal bias is only available for self-attention."
285
+ # scores = scores + \
286
+ # self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
287
+ # if mask is not None:
288
+ # scores = scores.masked_fill(mask == 0, -1e4)
289
+ # if self.block_length is not None:
290
+ # assert t_s == t_t, "Local attention is only available for self-attention."
291
+ # block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
292
+ # scores = scores.masked_fill(block_mask == 0, -1e4)
293
+ # p_attn = F.softmax(scores, dim=-1) # [b, h, t_t, t_s]
294
+ # p_attn = self.drop(p_attn)
295
+ # output = torch.matmul(p_attn, value) # [b,h,t_t,t_s],[b,h,t_s,c] -> [b,h,t_t,c]
296
+ # if self.window_size is not None:
297
+ # relative_weights = self._absolute_position_to_relative_position(p_attn) #[b, h, t_t, 2*t_t-1]
298
+ # value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) #[h or 1, 2*t_t-1, c]
299
+ # output = output + \
300
+ # self._matmul_with_relative_values(
301
+ # relative_weights, value_relative_embeddings) # [b, h, t_t, 2*t_t-1],[h or 1, 2*t_t-1, c] -> [b, h, t_t, c]
302
+ # output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, c] -> [b,h,c,t_t] -> [b, d, t_t]
303
+ if self.window_size is not None:
304
+ assert (
305
+ t_s == t_t
306
+ ), "Relative attention is only available for self-attention."
307
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
308
+ rel_logits = torch.einsum(
309
+ "bhdt,hed->bhte",
310
+ query / math.sqrt(self.k_channels),
311
+ key_relative_embeddings,
312
+ ) # [b,h,c,t_t],[h or 1,e,c] ->[b,h,t_t,e]
313
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
314
+ scores = scores + scores_local
315
+ if self.proximal_bias:
316
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
317
+ scores = scores + self._attention_bias_proximal(t_s).to(
318
+ device=scores.device, dtype=scores.dtype
319
+ )
320
+ if mask is not None:
321
+ scores = scores.masked_fill(mask == 0, -1e4)
322
+ if self.block_length is not None:
323
+ assert (
324
+ t_s == t_t
325
+ ), "Local attention is only available for self-attention."
326
+ block_mask = (
327
+ torch.ones_like(scores)
328
+ .triu(-self.block_length)
329
+ .tril(self.block_length)
330
+ )
331
+ scores = scores.masked_fill(block_mask == 0, -1e4)
332
+ p_attn = F.softmax(scores, dim=-1) # [b, h, t_t, t_s]
333
+ p_attn = self.drop(p_attn)
334
+ output = torch.einsum(
335
+ "bhcs,bhts->bhct", value, p_attn
336
+ ) # [b,h,c,t_s],[b,h,t_t,t_s] -> [b,h,c,t_t]
337
+ if self.window_size is not None:
338
+ relative_weights = self._absolute_position_to_relative_position(
339
+ p_attn
340
+ ) # [b, h, t_t, 2*t_t-1]
341
+ value_relative_embeddings = self._get_relative_embeddings(
342
+ self.emb_rel_v, t_s
343
+ ) # [h or 1, 2*t_t-1, c]
344
+ output = output + torch.einsum(
345
+ "bhte,hec->bhct", relative_weights, value_relative_embeddings
346
+ ) # [b, h, t_t, 2*t_t-1],[h or 1, 2*t_t-1, c] -> [b, h, c, t_t]
347
+ output = output.view(b, d, t_t) # [b, h, c, t_t] -> [b, d, t_t]
348
+ return output, p_attn
349
+
350
+ def _matmul_with_relative_values(self, x, y):
351
+ """
352
+ x: [b, h, l, m]
353
+ y: [h or 1, m, d]
354
+ ret: [b, h, l, d]
355
+ """
356
+ ret = torch.matmul(x, y.unsqueeze(0))
357
+ return ret
358
+
359
+ def _matmul_with_relative_keys(self, x, y):
360
+ """
361
+ x: [b, h, l, d]
362
+ y: [h or 1, m, d]
363
+ ret: [b, h, l, m]
364
+ """
365
+ # ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
366
+ ret = torch.einsum("bhld,hmd -> bhlm", x, y)
367
+ return ret
368
+
369
+ def _get_relative_embeddings(self, relative_embeddings, length):
370
+ max_relative_position = 2 * self.window_size + 1
371
+ # Pad first before slice to avoid using cond ops.
372
+ pad_length = max(length - (self.window_size + 1), 0)
373
+ slice_start_position = max((self.window_size + 1) - length, 0)
374
+ slice_end_position = slice_start_position + 2 * length - 1
375
+ if pad_length > 0:
376
+ padded_relative_embeddings = F.pad(
377
+ relative_embeddings,
378
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
379
+ )
380
+ else:
381
+ padded_relative_embeddings = relative_embeddings
382
+ used_relative_embeddings = padded_relative_embeddings[
383
+ :, slice_start_position:slice_end_position
384
+ ]
385
+ return used_relative_embeddings
386
+
387
+ def _relative_position_to_absolute_position(self, x):
388
+ """
389
+ x: [b, h, l, 2*l-1]
390
+ ret: [b, h, l, l]
391
+ """
392
+ batch, heads, length, _ = x.size()
393
+ # Concat columns of pad to shift from relative to absolute indexing.
394
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
395
+
396
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
397
+ x_flat = x.view([batch, heads, length * 2 * length])
398
+ x_flat = F.pad(
399
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
400
+ )
401
+
402
+ # Reshape and slice out the padded elements.
403
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
404
+ :, :, :length, length - 1 :
405
+ ]
406
+ return x_final
407
+
408
+ def _absolute_position_to_relative_position(self, x):
409
+ """
410
+ x: [b, h, l, l]
411
+ ret: [b, h, l, 2*l-1]
412
+ """
413
+ batch, heads, length, _ = x.size()
414
+ # padd along column
415
+ x = F.pad(
416
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
417
+ )
418
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
419
+ # add 0's in the beginning that will skew the elements after reshape
420
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
421
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
422
+ return x_final
423
+
424
+ def _attention_bias_proximal(self, length):
425
+ """Bias for self-attention to encourage attention to close positions.
426
+ Args:
427
+ length: an integer scalar.
428
+ Returns:
429
+ a Tensor with shape [1, 1, length, length]
430
+ """
431
+ r = torch.arange(length, dtype=torch.float32)
432
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
433
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
434
+
435
+
436
+ class FFN(nn.Module):
437
+ def __init__(
438
+ self,
439
+ in_channels,
440
+ out_channels,
441
+ filter_channels,
442
+ kernel_size,
443
+ p_dropout=0.0,
444
+ activation=None,
445
+ causal=False,
446
+ ):
447
+ super().__init__()
448
+ self.in_channels = in_channels
449
+ self.out_channels = out_channels
450
+ self.filter_channels = filter_channels
451
+ self.kernel_size = kernel_size
452
+ self.p_dropout = p_dropout
453
+ self.activation = activation
454
+ self.causal = causal
455
+
456
+ if causal:
457
+ self.padding = self._causal_padding
458
+ else:
459
+ self.padding = self._same_padding
460
+
461
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
462
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
463
+ self.drop = nn.Dropout(p_dropout)
464
+
465
+ def forward(self, x, x_mask):
466
+ x = self.conv_1(self.padding(x * x_mask))
467
+ if self.activation == "gelu":
468
+ x = x * torch.sigmoid(1.702 * x)
469
+ else:
470
+ x = torch.relu(x)
471
+ x = self.drop(x)
472
+ x = self.conv_2(self.padding(x * x_mask))
473
+ return x * x_mask
474
+
475
+ def _causal_padding(self, x):
476
+ if self.kernel_size == 1:
477
+ return x
478
+ pad_l = self.kernel_size - 1
479
+ pad_r = 0
480
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
481
+ x = F.pad(x, commons.convert_pad_shape(padding))
482
+ return x
483
+
484
+ def _same_padding(self, x):
485
+ if self.kernel_size == 1:
486
+ return x
487
+ pad_l = (self.kernel_size - 1) // 2
488
+ pad_r = self.kernel_size // 2
489
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
490
+ x = F.pad(x, commons.convert_pad_shape(padding))
491
+ return x
pflow/models/components/commons.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://github.com/jaywalnut310/vits
2
+ import math
3
+ import torch
4
+ from torch.nn import functional as F
5
+
6
+
7
+ def init_weights(m, mean=0.0, std=0.01):
8
+ classname = m.__class__.__name__
9
+ if classname.find("Conv") != -1:
10
+ m.weight.data.normal_(mean, std)
11
+
12
+
13
+ def get_padding(kernel_size, dilation=1):
14
+ return int((kernel_size * dilation - dilation) / 2)
15
+
16
+
17
+ def convert_pad_shape(pad_shape):
18
+ l = pad_shape[::-1]
19
+ pad_shape = [item for sublist in l for item in sublist]
20
+ return pad_shape
21
+
22
+
23
+ def intersperse(lst, item):
24
+ result = [item] * (len(lst) * 2 + 1)
25
+ result[1::2] = lst
26
+ return result
27
+
28
+
29
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
30
+ """KL(P||Q)"""
31
+ kl = (logs_q - logs_p) - 0.5
32
+ kl += (
33
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
34
+ )
35
+ return kl
36
+
37
+
38
+ def rand_gumbel(shape):
39
+ """Sample from the Gumbel distribution, protect from overflows."""
40
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
41
+ return -torch.log(-torch.log(uniform_samples))
42
+
43
+
44
+ def rand_gumbel_like(x):
45
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
46
+ return g
47
+
48
+
49
+ def slice_segments(x, ids_str, segment_size=4):
50
+ ret = torch.zeros_like(x[:, :, :segment_size])
51
+ for i in range(x.size(0)):
52
+ idx_str = ids_str[i]
53
+ idx_end = idx_str + segment_size
54
+ ret[i] = x[i, :, idx_str:idx_end]
55
+ return ret
56
+
57
+
58
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
59
+ b, d, t = x.size()
60
+ if x_lengths is None:
61
+ x_lengths = t
62
+ ids_str_max = x_lengths - segment_size + 1
63
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
64
+ ids_str = torch.max(torch.zeros(ids_str.size()).to(ids_str.device), ids_str).to(
65
+ dtype=torch.long
66
+ )
67
+ ret = slice_segments(x, ids_str, segment_size)
68
+ return ret, ids_str
69
+
70
+
71
+ def rand_slice_segments_for_cat(x, x_lengths=None, segment_size=4):
72
+ b, d, t = x.size()
73
+ if x_lengths is None:
74
+ x_lengths = t
75
+ ids_str_max = x_lengths - segment_size + 1
76
+ ids_str = torch.rand([b // 2]).to(device=x.device)
77
+ ids_str = (torch.cat([ids_str, ids_str], dim=0) * ids_str_max).to(dtype=torch.long)
78
+ ids_str = torch.max(torch.zeros(ids_str.size()).to(ids_str.device), ids_str).to(
79
+ dtype=torch.long
80
+ )
81
+ ret = slice_segments(x, ids_str, segment_size)
82
+ return ret, ids_str
83
+
84
+
85
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
86
+ position = torch.arange(length, dtype=torch.float)
87
+ num_timescales = channels // 2
88
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
89
+ num_timescales - 1
90
+ )
91
+ inv_timescales = min_timescale * torch.exp(
92
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
93
+ )
94
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
95
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
96
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
97
+ signal = signal.view(1, channels, length)
98
+ return signal
99
+
100
+
101
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
102
+ b, channels, length = x.size()
103
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
104
+ return x + signal.to(dtype=x.dtype, device=x.device)
105
+
106
+
107
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
108
+ b, channels, length = x.size()
109
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
110
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
111
+
112
+
113
+ def subsequent_mask(length):
114
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
115
+ return mask
116
+
117
+
118
+ @torch.jit.script
119
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
120
+ n_channels_int = n_channels[0]
121
+ in_act = input_a + input_b
122
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
123
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
124
+ acts = t_act * s_act
125
+ return acts
126
+
127
+
128
+ def convert_pad_shape(pad_shape):
129
+ l = pad_shape[::-1]
130
+ pad_shape = [item for sublist in l for item in sublist]
131
+ return pad_shape
132
+
133
+
134
+ def shift_1d(x):
135
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
136
+ return x
137
+
138
+
139
+ def sequence_mask(length, max_length=None):
140
+ if max_length is None:
141
+ max_length = length.max()
142
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
143
+ return x.unsqueeze(0) < length.unsqueeze(1)
144
+
145
+
146
+ def generate_path(duration, mask):
147
+ """
148
+ duration: [b, 1, t_x]
149
+ mask: [b, 1, t_y, t_x]
150
+ """
151
+ device = duration.device
152
+
153
+ b, _, t_y, t_x = mask.shape
154
+ cum_duration = torch.cumsum(duration, -1)
155
+
156
+ cum_duration_flat = cum_duration.view(b * t_x)
157
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
158
+ path = path.view(b, t_x, t_y)
159
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
160
+ path = path.unsqueeze(1).transpose(2, 3) * mask
161
+ return path
162
+
163
+
164
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
165
+ if isinstance(parameters, torch.Tensor):
166
+ parameters = [parameters]
167
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
168
+ norm_type = float(norm_type)
169
+ if clip_value is not None:
170
+ clip_value = float(clip_value)
171
+
172
+ total_norm = 0
173
+ for p in parameters:
174
+ param_norm = p.grad.data.norm(norm_type)
175
+ total_norm += param_norm.item() ** norm_type
176
+ if clip_value is not None:
177
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
178
+ total_norm = total_norm ** (1.0 / norm_type)
179
+ return total_norm
pflow/models/components/decoder.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from conformer import ConformerBlock
8
+ from diffusers.models.activations import get_activation
9
+ from einops import pack, rearrange, repeat
10
+
11
+ from pflow.models.components.transformer import BasicTransformerBlock
12
+
13
+
14
+ class SinusoidalPosEmb(torch.nn.Module):
15
+ def __init__(self, dim):
16
+ super().__init__()
17
+ self.dim = dim
18
+ assert self.dim % 2 == 0, "SinusoidalPosEmb requires dim to be even"
19
+
20
+ def forward(self, x, scale=1000):
21
+ if x.ndim < 1:
22
+ x = x.unsqueeze(0)
23
+ device = x.device
24
+ half_dim = self.dim // 2
25
+ emb = math.log(10000) / (half_dim - 1)
26
+ emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
27
+ emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
28
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
29
+ return emb
30
+
31
+
32
+ class Block1D(torch.nn.Module):
33
+ def __init__(self, dim, dim_out, groups=8):
34
+ super().__init__()
35
+ self.block = torch.nn.Sequential(
36
+ torch.nn.Conv1d(dim, dim_out, 3, padding=1),
37
+ torch.nn.GroupNorm(groups, dim_out),
38
+ nn.Mish(),
39
+ )
40
+
41
+ def forward(self, x, mask):
42
+ output = self.block(x * mask)
43
+ return output * mask
44
+
45
+
46
+ class ResnetBlock1D(torch.nn.Module):
47
+ def __init__(self, dim, dim_out, time_emb_dim, groups=8):
48
+ super().__init__()
49
+ self.mlp = torch.nn.Sequential(nn.Mish(), torch.nn.Linear(time_emb_dim, dim_out))
50
+
51
+ self.block1 = Block1D(dim, dim_out, groups=groups)
52
+ self.block2 = Block1D(dim_out, dim_out, groups=groups)
53
+
54
+ self.res_conv = torch.nn.Conv1d(dim, dim_out, 1)
55
+
56
+ def forward(self, x, mask, time_emb):
57
+ h = self.block1(x, mask)
58
+ h += self.mlp(time_emb).unsqueeze(-1)
59
+ h = self.block2(h, mask)
60
+ output = h + self.res_conv(x * mask)
61
+ return output
62
+
63
+
64
+ class Downsample1D(nn.Module):
65
+ def __init__(self, dim):
66
+ super().__init__()
67
+ self.conv = torch.nn.Conv1d(dim, dim, 3, 2, 1)
68
+
69
+ def forward(self, x):
70
+ return self.conv(x)
71
+
72
+
73
+ class TimestepEmbedding(nn.Module):
74
+ def __init__(
75
+ self,
76
+ in_channels: int,
77
+ time_embed_dim: int,
78
+ act_fn: str = "silu",
79
+ out_dim: int = None,
80
+ post_act_fn: Optional[str] = None,
81
+ cond_proj_dim=None,
82
+ ):
83
+ super().__init__()
84
+
85
+ self.linear_1 = nn.Linear(in_channels, time_embed_dim)
86
+
87
+ if cond_proj_dim is not None:
88
+ self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
89
+ else:
90
+ self.cond_proj = None
91
+
92
+ self.act = get_activation(act_fn)
93
+
94
+ if out_dim is not None:
95
+ time_embed_dim_out = out_dim
96
+ else:
97
+ time_embed_dim_out = time_embed_dim
98
+ self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)
99
+
100
+ if post_act_fn is None:
101
+ self.post_act = None
102
+ else:
103
+ self.post_act = get_activation(post_act_fn)
104
+
105
+ def forward(self, sample, condition=None):
106
+ if condition is not None:
107
+ sample = sample + self.cond_proj(condition)
108
+ sample = self.linear_1(sample)
109
+
110
+ if self.act is not None:
111
+ sample = self.act(sample)
112
+
113
+ sample = self.linear_2(sample)
114
+
115
+ if self.post_act is not None:
116
+ sample = self.post_act(sample)
117
+ return sample
118
+
119
+
120
+ class Upsample1D(nn.Module):
121
+ """A 1D upsampling layer with an optional convolution.
122
+
123
+ Parameters:
124
+ channels (`int`):
125
+ number of channels in the inputs and outputs.
126
+ use_conv (`bool`, default `False`):
127
+ option to use a convolution.
128
+ use_conv_transpose (`bool`, default `False`):
129
+ option to use a convolution transpose.
130
+ out_channels (`int`, optional):
131
+ number of output channels. Defaults to `channels`.
132
+ """
133
+
134
+ def __init__(self, channels, use_conv=False, use_conv_transpose=True, out_channels=None, name="conv"):
135
+ super().__init__()
136
+ self.channels = channels
137
+ self.out_channels = out_channels or channels
138
+ self.use_conv = use_conv
139
+ self.use_conv_transpose = use_conv_transpose
140
+ self.name = name
141
+
142
+ self.conv = None
143
+ if use_conv_transpose:
144
+ self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1)
145
+ elif use_conv:
146
+ self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1)
147
+
148
+ def forward(self, inputs):
149
+ assert inputs.shape[1] == self.channels
150
+ if self.use_conv_transpose:
151
+ return self.conv(inputs)
152
+
153
+ outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest")
154
+
155
+ if self.use_conv:
156
+ outputs = self.conv(outputs)
157
+
158
+ return outputs
159
+
160
+
161
+ class ConformerWrapper(ConformerBlock):
162
+ def __init__( # pylint: disable=useless-super-delegation
163
+ self,
164
+ *,
165
+ dim,
166
+ dim_head=64,
167
+ heads=8,
168
+ ff_mult=4,
169
+ conv_expansion_factor=2,
170
+ conv_kernel_size=31,
171
+ attn_dropout=0,
172
+ ff_dropout=0,
173
+ conv_dropout=0,
174
+ conv_causal=False,
175
+ ):
176
+ super().__init__(
177
+ dim=dim,
178
+ dim_head=dim_head,
179
+ heads=heads,
180
+ ff_mult=ff_mult,
181
+ conv_expansion_factor=conv_expansion_factor,
182
+ conv_kernel_size=conv_kernel_size,
183
+ attn_dropout=attn_dropout,
184
+ ff_dropout=ff_dropout,
185
+ conv_dropout=conv_dropout,
186
+ conv_causal=conv_causal,
187
+ )
188
+
189
+ def forward(
190
+ self,
191
+ hidden_states,
192
+ attention_mask,
193
+ encoder_hidden_states=None,
194
+ encoder_attention_mask=None,
195
+ timestep=None,
196
+ ):
197
+ return super().forward(x=hidden_states, mask=attention_mask.bool())
198
+
199
+ class Decoder(nn.Module):
200
+ def __init__(
201
+ self,
202
+ in_channels,
203
+ out_channels,
204
+ channels=(256, 256),
205
+ dropout=0.05,
206
+ attention_head_dim=64,
207
+ n_blocks=1,
208
+ num_mid_blocks=2,
209
+ num_heads=4,
210
+ act_fn="snake",
211
+ down_block_type="transformer",
212
+ mid_block_type="transformer",
213
+ up_block_type="transformer",
214
+ ):
215
+ super().__init__()
216
+ channels = tuple(channels)
217
+ self.in_channels = in_channels
218
+ self.out_channels = out_channels
219
+
220
+ self.time_embeddings = SinusoidalPosEmb(in_channels)
221
+ time_embed_dim = channels[0] * 4
222
+ self.time_mlp = TimestepEmbedding(
223
+ in_channels=in_channels,
224
+ time_embed_dim=time_embed_dim,
225
+ act_fn="silu",
226
+ )
227
+ self.down_blocks = nn.ModuleList([])
228
+ self.mid_blocks = nn.ModuleList([])
229
+ self.up_blocks = nn.ModuleList([])
230
+
231
+ output_channel = in_channels
232
+ for i in range(len(channels)): # pylint: disable=consider-using-enumerate
233
+ input_channel = output_channel
234
+ output_channel = channels[i]
235
+ is_last = i == len(channels) - 1
236
+ resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
237
+ transformer_blocks = nn.ModuleList(
238
+ [
239
+ self.get_block(
240
+ down_block_type,
241
+ output_channel,
242
+ attention_head_dim,
243
+ num_heads,
244
+ dropout,
245
+ act_fn,
246
+ )
247
+ for _ in range(n_blocks)
248
+ ]
249
+ )
250
+ downsample = (
251
+ Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1)
252
+ )
253
+
254
+ self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
255
+
256
+ for i in range(num_mid_blocks):
257
+ input_channel = channels[-1]
258
+ out_channels = channels[-1]
259
+
260
+ resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
261
+
262
+ transformer_blocks = nn.ModuleList(
263
+ [
264
+ self.get_block(
265
+ mid_block_type,
266
+ output_channel,
267
+ attention_head_dim,
268
+ num_heads,
269
+ dropout,
270
+ act_fn,
271
+ )
272
+ for _ in range(n_blocks)
273
+ ]
274
+ )
275
+
276
+ self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
277
+
278
+ channels = channels[::-1] + (channels[0],)
279
+ for i in range(len(channels) - 1):
280
+ input_channel = channels[i]
281
+ output_channel = channels[i + 1]
282
+ is_last = i == len(channels) - 2
283
+
284
+ resnet = ResnetBlock1D(
285
+ dim=2 * input_channel,
286
+ dim_out=output_channel,
287
+ time_emb_dim=time_embed_dim,
288
+ )
289
+ transformer_blocks = nn.ModuleList(
290
+ [
291
+ self.get_block(
292
+ up_block_type,
293
+ output_channel,
294
+ attention_head_dim,
295
+ num_heads,
296
+ dropout,
297
+ act_fn,
298
+ )
299
+ for _ in range(n_blocks)
300
+ ]
301
+ )
302
+ upsample = (
303
+ Upsample1D(output_channel, use_conv_transpose=True)
304
+ if not is_last
305
+ else nn.Conv1d(output_channel, output_channel, 3, padding=1)
306
+ )
307
+
308
+ self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
309
+
310
+ self.final_block = Block1D(channels[-1], channels[-1])
311
+ self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
312
+
313
+ self.initialize_weights()
314
+ # nn.init.normal_(self.final_proj.weight)
315
+
316
+
317
+
318
+ @staticmethod
319
+ def get_block(block_type, dim, attention_head_dim, num_heads, dropout, act_fn):
320
+ if block_type == "conformer":
321
+ block = ConformerWrapper(
322
+ dim=dim,
323
+ dim_head=attention_head_dim,
324
+ heads=num_heads,
325
+ ff_mult=1,
326
+ conv_expansion_factor=2,
327
+ ff_dropout=dropout,
328
+ attn_dropout=dropout,
329
+ conv_dropout=dropout,
330
+ conv_kernel_size=31,
331
+ )
332
+ elif block_type == "transformer":
333
+ block = BasicTransformerBlock(
334
+ dim=dim,
335
+ num_attention_heads=num_heads,
336
+ attention_head_dim=attention_head_dim,
337
+ dropout=dropout,
338
+ activation_fn=act_fn,
339
+ )
340
+ else:
341
+ raise ValueError(f"Unknown block type {block_type}")
342
+
343
+ return block
344
+
345
+ def initialize_weights(self):
346
+ for m in self.modules():
347
+ if isinstance(m, nn.Conv1d):
348
+ nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
349
+
350
+ if m.bias is not None:
351
+ nn.init.constant_(m.bias, 0)
352
+
353
+ elif isinstance(m, nn.GroupNorm):
354
+ nn.init.constant_(m.weight, 1)
355
+ nn.init.constant_(m.bias, 0)
356
+
357
+ elif isinstance(m, nn.Linear):
358
+ nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
359
+
360
+ if m.bias is not None:
361
+ nn.init.constant_(m.bias, 0)
362
+
363
+ def forward(self, x, mask, mu, t, spks=None, cond=None, training=True):
364
+ """Forward pass of the UNet1DConditional model.
365
+
366
+ Args:
367
+ x (torch.Tensor): shape (batch_size, in_channels, time)
368
+ mask (_type_): shape (batch_size, 1, time)
369
+ t (_type_): shape (batch_size)
370
+ spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
371
+ cond (_type_, optional): placeholder for future use. Defaults to None.
372
+
373
+ Raises:
374
+ ValueError: _description_
375
+ ValueError: _description_
376
+
377
+ Returns:
378
+ _type_: _description_
379
+ """
380
+
381
+ t = self.time_embeddings(t)
382
+ t = self.time_mlp(t)
383
+
384
+ x = pack([x, mu], "b * t")[0]
385
+
386
+ if spks is not None:
387
+ spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
388
+ x = pack([x, spks], "b * t")[0]
389
+
390
+ hiddens = []
391
+ masks = [mask]
392
+ for resnet, transformer_blocks, downsample in self.down_blocks:
393
+ mask_down = masks[-1]
394
+ x = resnet(x, mask_down, t)
395
+ x = rearrange(x, "b c t -> b t c")
396
+ mask_down = rearrange(mask_down, "b 1 t -> b t")
397
+ for transformer_block in transformer_blocks:
398
+ x = transformer_block(
399
+ hidden_states=x,
400
+ attention_mask=mask_down,
401
+ timestep=t,
402
+ )
403
+ x = rearrange(x, "b t c -> b c t")
404
+ mask_down = rearrange(mask_down, "b t -> b 1 t")
405
+ hiddens.append(x) # Save hidden states for skip connections
406
+ x = downsample(x * mask_down)
407
+ masks.append(mask_down[:, :, ::2])
408
+
409
+ masks = masks[:-1]
410
+ mask_mid = masks[-1]
411
+
412
+ for resnet, transformer_blocks in self.mid_blocks:
413
+ x = resnet(x, mask_mid, t)
414
+ x = rearrange(x, "b c t -> b t c")
415
+ mask_mid = rearrange(mask_mid, "b 1 t -> b t")
416
+ for transformer_block in transformer_blocks:
417
+ x = transformer_block(
418
+ hidden_states=x,
419
+ attention_mask=mask_mid,
420
+ timestep=t,
421
+ )
422
+ x = rearrange(x, "b t c -> b c t")
423
+ mask_mid = rearrange(mask_mid, "b t -> b 1 t")
424
+
425
+ for resnet, transformer_blocks, upsample in self.up_blocks:
426
+ mask_up = masks.pop()
427
+ x = resnet(pack([x, hiddens.pop()], "b * t")[0], mask_up, t)
428
+ x = rearrange(x, "b c t -> b t c")
429
+ mask_up = rearrange(mask_up, "b 1 t -> b t")
430
+ for transformer_block in transformer_blocks:
431
+ x = transformer_block(
432
+ hidden_states=x,
433
+ attention_mask=mask_up,
434
+ timestep=t,
435
+ )
436
+ x = rearrange(x, "b t c -> b c t")
437
+ mask_up = rearrange(mask_up, "b t -> b 1 t")
438
+ x = upsample(x * mask_up)
439
+
440
+ x = self.final_block(x, mask_up)
441
+ output = self.final_proj(x * mask_up)
442
+ output = output * mask
443
+
444
+ return output * mask
pflow/models/components/flow_matching.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+ from pflow.models.components.decoder import Decoder
7
+ from pflow.models.components.wn_pflow_decoder import DiffSingerNet
8
+ from pflow.models.components.vits_wn_decoder import VitsWNDecoder
9
+
10
+ from pflow.utils.pylogger import get_pylogger
11
+
12
+ log = get_pylogger(__name__)
13
+
14
+
15
+ class BASECFM(torch.nn.Module, ABC):
16
+ def __init__(
17
+ self,
18
+ n_feats,
19
+ cfm_params,
20
+ n_spks=1,
21
+ spk_emb_dim=128,
22
+ ):
23
+ super().__init__()
24
+ self.n_feats = n_feats
25
+ self.n_spks = n_spks
26
+ self.spk_emb_dim = spk_emb_dim
27
+ self.solver = cfm_params.solver
28
+ if hasattr(cfm_params, "sigma_min"):
29
+ self.sigma_min = cfm_params.sigma_min
30
+ else:
31
+ self.sigma_min = 1e-4
32
+
33
+ self.estimator = None
34
+
35
+ @torch.inference_mode()
36
+ def forward(self, mu, mask, n_timesteps, temperature=1.0, cond=None, training=False, guidance_scale=0.0):
37
+ """Forward diffusion
38
+
39
+ Args:
40
+ mu (torch.Tensor): output of encoder
41
+ shape: (batch_size, n_feats, mel_timesteps)
42
+ mask (torch.Tensor): output_mask
43
+ shape: (batch_size, 1, mel_timesteps)
44
+ n_timesteps (int): number of diffusion steps
45
+ temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
46
+ cond: Not used but kept for future purposes
47
+
48
+ Returns:
49
+ sample: generated mel-spectrogram
50
+ shape: (batch_size, n_feats, mel_timesteps)
51
+ """
52
+ z = torch.randn_like(mu) * temperature
53
+ t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
54
+ return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, cond=cond, training=training, guidance_scale=guidance_scale)
55
+
56
+ def solve_euler(self, x, t_span, mu, mask, cond, training=False, guidance_scale=0.0):
57
+ """
58
+ Fixed euler solver for ODEs.
59
+ Args:
60
+ x (torch.Tensor): random noise
61
+ t_span (torch.Tensor): n_timesteps interpolated
62
+ shape: (n_timesteps + 1,)
63
+ mu (torch.Tensor): output of encoder
64
+ shape: (batch_size, n_feats, mel_timesteps)
65
+ mask (torch.Tensor): output_mask
66
+ shape: (batch_size, 1, mel_timesteps)
67
+ cond: Not used but kept for future purposes
68
+ """
69
+ t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
70
+
71
+ # I am storing this because I can later plot it by putting a debugger here and saving it to a file
72
+ # Or in future might add like a return_all_steps flag
73
+ sol = []
74
+ steps = 1
75
+ while steps <= len(t_span) - 1:
76
+ dphi_dt = self.estimator(x, mask, mu, t, cond, training=training)
77
+ if guidance_scale > 0.0:
78
+ mu_avg = mu.mean(2, keepdims=True).expand_as(mu)
79
+ dphi_avg = self.estimator(x, mask, mu_avg, t, cond, training=training)
80
+ dphi_dt = dphi_dt + guidance_scale * (dphi_dt - dphi_avg)
81
+
82
+ x = x + dt * dphi_dt
83
+ t = t + dt
84
+ sol.append(x)
85
+ if steps < len(t_span) - 1:
86
+ dt = t_span[steps + 1] - t
87
+ steps += 1
88
+
89
+ return sol[-1]
90
+
91
+ def compute_loss(self, x1, mask, mu, cond=None, training=True, loss_mask=None):
92
+ """Computes diffusion loss
93
+
94
+ Args:
95
+ x1 (torch.Tensor): Target
96
+ shape: (batch_size, n_feats, mel_timesteps)
97
+ mask (torch.Tensor): target mask
98
+ shape: (batch_size, 1, mel_timesteps)
99
+ mu (torch.Tensor): output of encoder
100
+ shape: (batch_size, n_feats, mel_timesteps)
101
+ spks (torch.Tensor, optional): speaker embedding. Defaults to None.
102
+ shape: (batch_size, spk_emb_dim)
103
+
104
+ Returns:
105
+ loss: conditional flow matching loss
106
+ y: conditional flow
107
+ shape: (batch_size, n_feats, mel_timesteps)
108
+ """
109
+ b, _, t = mu.shape
110
+
111
+ # random timestep
112
+ t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
113
+ # sample noise p(x_0)
114
+ z = torch.randn_like(x1)
115
+
116
+ y = (1 - (1 - self.sigma_min) * t) * z + t * x1
117
+ u = x1 - (1 - self.sigma_min) * z
118
+ # y = u * t + z
119
+ estimator_out = self.estimator(y, mask, mu, t.squeeze(), training=training)
120
+
121
+ if loss_mask is not None:
122
+ mask = loss_mask
123
+ loss = F.mse_loss(estimator_out*mask, u*mask, reduction="sum") / (
124
+ torch.sum(mask) * u.shape[1]
125
+ )
126
+ return loss, y
127
+
128
+
129
+ class CFM(BASECFM):
130
+ def __init__(self, in_channels, out_channel, cfm_params, decoder_params):
131
+ super().__init__(
132
+ n_feats=in_channels,
133
+ cfm_params=cfm_params,
134
+ )
135
+
136
+ # Just change the architecture of the estimator here
137
+ self.estimator = Decoder(in_channels=in_channels*2, out_channels=out_channel, **decoder_params)
138
+ # self.estimator = DiffSingerNet(in_dims=in_channels, encoder_hidden=out_channel)
139
+ # self.estimator = VitsWNDecoder(
140
+ # in_channels=in_channels,
141
+ # out_channels=out_channel,
142
+ # hidden_channels=out_channel,
143
+ # kernel_size=3,
144
+ # dilation_rate=1,
145
+ # n_layers=18,
146
+ # gin_channels=out_channel*2
147
+ # )
148
+
pflow/models/components/speech_prompt_encoder.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jaywalnut310/glow-tts """
2
+
3
+ import math
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from einops import rearrange
8
+
9
+ import pflow.utils as utils
10
+ from pflow.utils.model import sequence_mask
11
+ from pflow.models.components import commons
12
+ from pflow.models.components.vits_posterior import PosteriorEncoder
13
+ from pflow.models.components.transformer import BasicTransformerBlock
14
+
15
+ log = utils.get_pylogger(__name__)
16
+
17
+ class LayerNorm(nn.Module):
18
+ def __init__(self, channels, eps=1e-4):
19
+ super().__init__()
20
+ self.channels = channels
21
+ self.eps = eps
22
+
23
+ self.gamma = torch.nn.Parameter(torch.ones(channels))
24
+ self.beta = torch.nn.Parameter(torch.zeros(channels))
25
+
26
+ def forward(self, x):
27
+ n_dims = len(x.shape)
28
+ mean = torch.mean(x, 1, keepdim=True)
29
+ variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
30
+
31
+ x = (x - mean) * torch.rsqrt(variance + self.eps)
32
+
33
+ shape = [1, -1] + [1] * (n_dims - 2)
34
+ x = x * self.gamma.view(*shape) + self.beta.view(*shape)
35
+ return x
36
+
37
+
38
+ class ConvReluNorm(nn.Module):
39
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
40
+ super().__init__()
41
+ self.in_channels = in_channels
42
+ self.hidden_channels = hidden_channels
43
+ self.out_channels = out_channels
44
+ self.kernel_size = kernel_size
45
+ self.n_layers = n_layers
46
+ self.p_dropout = p_dropout
47
+
48
+ self.conv_layers = torch.nn.ModuleList()
49
+ self.norm_layers = torch.nn.ModuleList()
50
+ self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
51
+ self.norm_layers.append(LayerNorm(hidden_channels))
52
+ self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout))
53
+ for _ in range(n_layers - 1):
54
+ self.conv_layers.append(
55
+ torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
56
+ )
57
+ self.norm_layers.append(LayerNorm(hidden_channels))
58
+ self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
59
+ self.proj.weight.data.zero_()
60
+ self.proj.bias.data.zero_()
61
+
62
+ def forward(self, x, x_mask):
63
+ x_org = x
64
+ for i in range(self.n_layers):
65
+ x = self.conv_layers[i](x * x_mask)
66
+ x = self.norm_layers[i](x)
67
+ x = self.relu_drop(x)
68
+ x = x_org + self.proj(x)
69
+ return x * x_mask
70
+
71
+
72
+ class DurationPredictor(nn.Module):
73
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout):
74
+ super().__init__()
75
+ self.in_channels = in_channels
76
+ self.filter_channels = filter_channels
77
+ self.p_dropout = p_dropout
78
+
79
+ self.drop = torch.nn.Dropout(p_dropout)
80
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
81
+ self.norm_1 = LayerNorm(filter_channels)
82
+ self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
83
+ self.norm_2 = LayerNorm(filter_channels)
84
+ self.proj = torch.nn.Conv1d(filter_channels, 1, 1)
85
+
86
+ def forward(self, x, x_mask):
87
+ x = self.conv_1(x * x_mask)
88
+ x = torch.relu(x)
89
+ x = self.norm_1(x)
90
+ x = self.drop(x)
91
+ x = self.conv_2(x * x_mask)
92
+ x = torch.relu(x)
93
+ x = self.norm_2(x)
94
+ x = self.drop(x)
95
+ x = self.proj(x * x_mask)
96
+ # x = torch.relu(x)
97
+ return x * x_mask
98
+
99
+ class DurationPredictorNS2(nn.Module):
100
+ def __init__(
101
+ self, in_channels, filter_channels, kernel_size, p_dropout=0.5
102
+ ):
103
+ super().__init__()
104
+
105
+ self.in_channels = in_channels
106
+ self.filter_channels = filter_channels
107
+ self.kernel_size = kernel_size
108
+ self.p_dropout = p_dropout
109
+
110
+ self.drop = nn.Dropout(p_dropout)
111
+ self.conv_1 = nn.Conv1d(
112
+ in_channels, filter_channels, kernel_size, padding=kernel_size // 2
113
+ )
114
+ self.norm_1 = LayerNorm(filter_channels)
115
+
116
+ self.module_list = nn.ModuleList()
117
+ self.module_list.append(self.conv_1)
118
+ self.module_list.append(nn.ReLU())
119
+ self.module_list.append(self.norm_1)
120
+ self.module_list.append(self.drop)
121
+
122
+ for i in range(12):
123
+ self.module_list.append(nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2))
124
+ self.module_list.append(nn.ReLU())
125
+ self.module_list.append(LayerNorm(filter_channels))
126
+ self.module_list.append(nn.Dropout(p_dropout))
127
+
128
+
129
+ # attention layer every 3 layers
130
+ self.attn_list = nn.ModuleList()
131
+ for i in range(4):
132
+ self.attn_list.append(
133
+ Encoder(
134
+ filter_channels,
135
+ filter_channels,
136
+ 8,
137
+ 10,
138
+ 3,
139
+ p_dropout=p_dropout,
140
+ )
141
+ )
142
+
143
+ for i in range(30):
144
+ if i+1 % 3 == 0:
145
+ self.module_list.append(self.attn_list[i//3])
146
+
147
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
148
+
149
+ def forward(self, x, x_mask):
150
+ x = torch.detach(x)
151
+ for layer in self.module_list:
152
+ x = layer(x * x_mask)
153
+ x = self.proj(x * x_mask)
154
+ # x = torch.relu(x)
155
+ return x * x_mask
156
+
157
+ class RotaryPositionalEmbeddings(nn.Module):
158
+ """
159
+ ## RoPE module
160
+
161
+ Rotary encoding transforms pairs of features by rotating in the 2D plane.
162
+ That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
163
+ Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
164
+ by an angle depending on the position of the token.
165
+ """
166
+
167
+ def __init__(self, d: int, base: int = 10_000):
168
+ r"""
169
+ * `d` is the number of features $d$
170
+ * `base` is the constant used for calculating $\Theta$
171
+ """
172
+ super().__init__()
173
+
174
+ self.base = base
175
+ self.d = int(d)
176
+ self.cos_cached = None
177
+ self.sin_cached = None
178
+
179
+ def _build_cache(self, x: torch.Tensor):
180
+ r"""
181
+ Cache $\cos$ and $\sin$ values
182
+ """
183
+ # Return if cache is already built
184
+ if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
185
+ return
186
+
187
+ # Get sequence length
188
+ seq_len = x.shape[0]
189
+
190
+ # $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
191
+ theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
192
+
193
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
194
+ seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
195
+
196
+ # Calculate the product of position index and $\theta_i$
197
+ idx_theta = torch.einsum("n,d->nd", seq_idx, theta)
198
+
199
+ # Concatenate so that for row $m$ we have
200
+ # $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
201
+ idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
202
+
203
+ # Cache them
204
+ self.cos_cached = idx_theta2.cos()[:, None, None, :]
205
+ self.sin_cached = idx_theta2.sin()[:, None, None, :]
206
+
207
+ def _neg_half(self, x: torch.Tensor):
208
+ # $\frac{d}{2}$
209
+ d_2 = self.d // 2
210
+
211
+ # Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
212
+ return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
213
+
214
+ def forward(self, x: torch.Tensor):
215
+ """
216
+ * `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
217
+ """
218
+ # Cache $\cos$ and $\sin$ values
219
+ x = rearrange(x, "b h t d -> t b h d")
220
+
221
+ self._build_cache(x)
222
+
223
+ # Split the features, we can choose to apply rotary embeddings only to a partial set of features.
224
+ x_rope, x_pass = x[..., : self.d], x[..., self.d :]
225
+
226
+ # Calculate
227
+ # $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
228
+ neg_half_x = self._neg_half(x_rope)
229
+
230
+ x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]])
231
+
232
+ return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d")
233
+
234
+
235
+ class MultiHeadAttention(nn.Module):
236
+ def __init__(
237
+ self,
238
+ channels,
239
+ out_channels,
240
+ n_heads,
241
+ heads_share=True,
242
+ p_dropout=0.0,
243
+ proximal_bias=False,
244
+ proximal_init=False,
245
+ ):
246
+ super().__init__()
247
+ assert channels % n_heads == 0
248
+
249
+ self.channels = channels
250
+ self.out_channels = out_channels
251
+ self.n_heads = n_heads
252
+ self.heads_share = heads_share
253
+ self.proximal_bias = proximal_bias
254
+ self.p_dropout = p_dropout
255
+ self.attn = None
256
+
257
+ self.k_channels = channels // n_heads
258
+ self.conv_q = torch.nn.Conv1d(channels, channels, 1)
259
+ self.conv_k = torch.nn.Conv1d(channels, channels, 1)
260
+ self.conv_v = torch.nn.Conv1d(channels, channels, 1)
261
+
262
+ # from https://nn.labml.ai/transformers/rope/index.html
263
+ self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
264
+ self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
265
+
266
+ self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
267
+ self.drop = torch.nn.Dropout(p_dropout)
268
+
269
+ torch.nn.init.xavier_uniform_(self.conv_q.weight)
270
+ torch.nn.init.xavier_uniform_(self.conv_k.weight)
271
+ if proximal_init:
272
+ self.conv_k.weight.data.copy_(self.conv_q.weight.data)
273
+ self.conv_k.bias.data.copy_(self.conv_q.bias.data)
274
+ torch.nn.init.xavier_uniform_(self.conv_v.weight)
275
+
276
+ def forward(self, x, c, attn_mask=None):
277
+ q = self.conv_q(x)
278
+ k = self.conv_k(c)
279
+ v = self.conv_v(c)
280
+
281
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
282
+
283
+ x = self.conv_o(x)
284
+ return x
285
+
286
+ def attention(self, query, key, value, mask=None):
287
+ b, d, t_s, t_t = (*key.size(), query.size(2))
288
+ query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads)
289
+ key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads)
290
+ value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads)
291
+
292
+ query = self.query_rotary_pe(query)
293
+ key = self.key_rotary_pe(key)
294
+
295
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
296
+
297
+ if self.proximal_bias:
298
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
299
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
300
+ if mask is not None:
301
+ scores = scores.masked_fill(mask == 0, -1e4)
302
+ p_attn = torch.nn.functional.softmax(scores, dim=-1)
303
+ p_attn = self.drop(p_attn)
304
+ output = torch.matmul(p_attn, value)
305
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t)
306
+ return output, p_attn
307
+
308
+ @staticmethod
309
+ def _attention_bias_proximal(length):
310
+ r = torch.arange(length, dtype=torch.float32)
311
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
312
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
313
+
314
+
315
+ class FFN(nn.Module):
316
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0):
317
+ super().__init__()
318
+ self.in_channels = in_channels
319
+ self.out_channels = out_channels
320
+ self.filter_channels = filter_channels
321
+ self.kernel_size = kernel_size
322
+ self.p_dropout = p_dropout
323
+
324
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
325
+ self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2)
326
+ self.drop = torch.nn.Dropout(p_dropout)
327
+
328
+ def forward(self, x, x_mask):
329
+ x = self.conv_1(x * x_mask)
330
+ x = torch.relu(x)
331
+ x = self.drop(x)
332
+ x = self.conv_2(x * x_mask)
333
+ return x * x_mask
334
+
335
+
336
+ class Encoder(nn.Module):
337
+ def __init__(
338
+ self,
339
+ hidden_channels,
340
+ filter_channels,
341
+ n_heads,
342
+ n_layers,
343
+ kernel_size=1,
344
+ p_dropout=0.0,
345
+ **kwargs,
346
+ ):
347
+ super().__init__()
348
+ self.hidden_channels = hidden_channels
349
+ self.filter_channels = filter_channels
350
+ self.n_heads = n_heads
351
+ self.n_layers = n_layers
352
+ self.kernel_size = kernel_size
353
+ self.p_dropout = p_dropout
354
+
355
+ self.drop = torch.nn.Dropout(p_dropout)
356
+ self.attn_layers = torch.nn.ModuleList()
357
+ self.norm_layers_1 = torch.nn.ModuleList()
358
+ self.ffn_layers = torch.nn.ModuleList()
359
+ self.norm_layers_2 = torch.nn.ModuleList()
360
+ for _ in range(self.n_layers):
361
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
362
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
363
+ self.ffn_layers.append(
364
+ FFN(
365
+ hidden_channels,
366
+ hidden_channels,
367
+ filter_channels,
368
+ kernel_size,
369
+ p_dropout=p_dropout,
370
+ )
371
+ )
372
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
373
+
374
+ def forward(self, x, x_mask):
375
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
376
+ for i in range(self.n_layers):
377
+ x = x * x_mask
378
+ y = self.attn_layers[i](x, x, attn_mask)
379
+ y = self.drop(y)
380
+ x = self.norm_layers_1[i](x + y)
381
+ y = self.ffn_layers[i](x, x_mask)
382
+ y = self.drop(y)
383
+ x = self.norm_layers_2[i](x + y)
384
+ x = x * x_mask
385
+ return x
386
+
387
+ class Decoder(nn.Module):
388
+ def __init__(
389
+ self,
390
+ hidden_channels,
391
+ filter_channels,
392
+ n_heads,
393
+ n_layers,
394
+ kernel_size=1,
395
+ p_dropout=0.0,
396
+ proximal_bias=False,
397
+ proximal_init=True,
398
+ **kwargs
399
+ ):
400
+ super().__init__()
401
+ self.hidden_channels = hidden_channels
402
+ self.filter_channels = filter_channels
403
+ self.n_heads = n_heads
404
+ self.n_layers = n_layers
405
+ self.kernel_size = kernel_size
406
+ self.p_dropout = p_dropout
407
+ self.proximal_bias = proximal_bias
408
+ self.proximal_init = proximal_init
409
+
410
+ self.drop = nn.Dropout(p_dropout)
411
+ self.self_attn_layers = nn.ModuleList()
412
+ self.norm_layers_0 = nn.ModuleList()
413
+ self.encdec_attn_layers = nn.ModuleList()
414
+ self.norm_layers_1 = nn.ModuleList()
415
+ self.ffn_layers = nn.ModuleList()
416
+ self.norm_layers_2 = nn.ModuleList()
417
+ for i in range(self.n_layers):
418
+ self.self_attn_layers.append(
419
+ MultiHeadAttention(
420
+ hidden_channels,
421
+ hidden_channels,
422
+ n_heads,
423
+ p_dropout=p_dropout
424
+ )
425
+ )
426
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
427
+ self.encdec_attn_layers.append(
428
+ MultiHeadAttention(
429
+ hidden_channels,
430
+ hidden_channels,
431
+ n_heads,
432
+ p_dropout=p_dropout
433
+ )
434
+ )
435
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
436
+ self.ffn_layers.append(
437
+ FFN(
438
+ hidden_channels,
439
+ hidden_channels,
440
+ filter_channels,
441
+ kernel_size,
442
+ p_dropout=p_dropout,
443
+ )
444
+ )
445
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
446
+
447
+ def forward(self, x, x_mask, h, h_mask):
448
+ """
449
+ x: decoder input
450
+ h: encoder output
451
+ """
452
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
453
+ device=x.device, dtype=x.dtype
454
+ )
455
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
456
+ x = x * x_mask
457
+ for i in range(self.n_layers):
458
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
459
+ y = self.drop(y)
460
+ x = self.norm_layers_0[i](x + y)
461
+
462
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
463
+ y = self.drop(y)
464
+ x = self.norm_layers_1[i](x + y)
465
+
466
+ y = self.ffn_layers[i](x, x_mask)
467
+ y = self.drop(y)
468
+ x = self.norm_layers_2[i](x + y)
469
+ x = x * x_mask
470
+ return x
471
+
472
+ class TextEncoder(nn.Module):
473
+ def __init__(
474
+ self,
475
+ encoder_type,
476
+ encoder_params,
477
+ duration_predictor_params,
478
+ n_vocab,
479
+ speech_in_channels,
480
+ ):
481
+ super().__init__()
482
+ self.encoder_type = encoder_type
483
+ self.n_vocab = n_vocab
484
+ self.n_feats = encoder_params.n_feats
485
+ self.n_channels = encoder_params.n_channels
486
+
487
+ self.emb = torch.nn.Embedding(n_vocab, self.n_channels)
488
+ torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5)
489
+
490
+ self.speech_in_channels = speech_in_channels
491
+ self.speech_out_channels = self.n_channels
492
+ self.speech_prompt_proj = torch.nn.Conv1d(self.speech_in_channels, self.speech_out_channels, 1)
493
+ # self.speech_prompt_proj = PosteriorEncoder(
494
+ # self.speech_in_channels,
495
+ # self.speech_out_channels,
496
+ # self.speech_out_channels,
497
+ # 1,
498
+ # 1,
499
+ # 1,
500
+ # gin_channels=0,
501
+ # )
502
+
503
+ self.prenet = ConvReluNorm(
504
+ self.n_channels,
505
+ self.n_channels,
506
+ self.n_channels,
507
+ kernel_size=5,
508
+ n_layers=3,
509
+ p_dropout=0,
510
+ )
511
+
512
+ self.speech_prompt_encoder = Encoder(
513
+ encoder_params.n_channels,
514
+ encoder_params.filter_channels,
515
+ encoder_params.n_heads,
516
+ encoder_params.n_layers,
517
+ encoder_params.kernel_size,
518
+ encoder_params.p_dropout,
519
+ )
520
+
521
+ self.text_base_encoder = Encoder(
522
+ encoder_params.n_channels,
523
+ encoder_params.filter_channels,
524
+ encoder_params.n_heads,
525
+ encoder_params.n_layers,
526
+ encoder_params.kernel_size,
527
+ encoder_params.p_dropout,
528
+ )
529
+
530
+ self.decoder = Decoder(
531
+ encoder_params.n_channels,
532
+ encoder_params.filter_channels,
533
+ encoder_params.n_heads,
534
+ encoder_params.n_layers,
535
+ encoder_params.kernel_size,
536
+ encoder_params.p_dropout,
537
+ )
538
+
539
+ self.transformerblock = BasicTransformerBlock(
540
+ encoder_params.n_channels,
541
+ encoder_params.n_heads,
542
+ encoder_params.n_channels // encoder_params.n_heads,
543
+ encoder_params.p_dropout,
544
+ encoder_params.n_channels,
545
+ activation_fn="gelu",
546
+ attention_bias=False,
547
+ only_cross_attention=False,
548
+ double_self_attention=False,
549
+ upcast_attention=False,
550
+ norm_elementwise_affine=True,
551
+ norm_type="layer_norm",
552
+ final_dropout=False,
553
+ )
554
+ self.proj_m = torch.nn.Conv1d(self.n_channels, self.n_feats, 1)
555
+
556
+ self.proj_w = DurationPredictor(
557
+ self.n_channels,
558
+ duration_predictor_params.filter_channels_dp,
559
+ duration_predictor_params.kernel_size,
560
+ duration_predictor_params.p_dropout,
561
+ )
562
+ # self.proj_w = DurationPredictorNS2(
563
+ # self.n_channels,
564
+ # duration_predictor_params.filter_channels_dp,
565
+ # duration_predictor_params.kernel_size,
566
+ # duration_predictor_params.p_dropout,
567
+ # )
568
+
569
+ # self.speech_prompt_pos_emb = RotaryPositionalEmbeddings(self.n_channels * 0.5)
570
+ # self.text_pos_emb = RotaryPositionalEmbeddings(self.n_channels * 0.5)
571
+
572
+ def forward(
573
+ self,
574
+ x_input,
575
+ x_lengths,
576
+ speech_prompt,
577
+ ):
578
+ """Run forward pass to the transformer based encoder and duration predictor
579
+
580
+ Args:
581
+ x (torch.Tensor): text input
582
+ shape: (batch_size, max_text_length)
583
+ x_lengths (torch.Tensor): text input lengths
584
+ shape: (batch_size,)
585
+ speech_prompt (torch.Tensor): speech prompt input
586
+
587
+ Returns:
588
+ mu (torch.Tensor): average output of the encoder
589
+ shape: (batch_size, n_feats, max_text_length)
590
+ logw (torch.Tensor): log duration predicted by the duration predictor
591
+ shape: (batch_size, 1, max_text_length)
592
+ x_mask (torch.Tensor): mask for the text input
593
+ shape: (batch_size, 1, max_text_length)
594
+ """
595
+
596
+ x_emb = self.emb(x_input) * math.sqrt(self.n_channels)
597
+ x_emb = torch.transpose(x_emb, 1, -1)
598
+ x_emb_mask = torch.unsqueeze(sequence_mask(x_lengths, x_emb.size(2)), 1).to(x_emb.dtype)
599
+ x_emb = self.text_base_encoder(x_emb, x_emb_mask)
600
+
601
+ x_speech_lengths = x_lengths + speech_prompt.size(2)
602
+ speech_lengths = x_speech_lengths - x_lengths
603
+ speech_mask = torch.unsqueeze(sequence_mask(speech_lengths, speech_prompt.size(2)), 1).to(x_emb.dtype)
604
+
605
+ speech_prompt_proj = self.speech_prompt_proj(speech_prompt)
606
+ # speech_prompt_proj, speech_mask = self.speech_prompt_proj(speech_prompt, speech_lengths)
607
+ # speech_prompt_proj = self.speech_prompt_encoder(speech_prompt_proj, speech_mask)
608
+
609
+ x_speech_cat = torch.cat([speech_prompt_proj, x_emb], dim=2)
610
+ x_speech_mask = torch.unsqueeze(sequence_mask(x_speech_lengths, x_speech_cat.size(2)), 1).to(x_speech_cat.dtype)
611
+
612
+ x_prenet = self.prenet(x_speech_cat, x_speech_mask)
613
+ # split speech prompt and text input
614
+ speech_prompt_proj = x_prenet[:, :, :speech_prompt_proj.size(2)]
615
+ x_split = x_prenet[:, :, speech_prompt_proj.size(2):]
616
+
617
+ # add positional encoding to speech prompt and x_split
618
+ # x_split = self.text_pos_emb(x_split.unsqueeze(1).transpose(-2,-1)).squeeze(1).transpose(-2,-1)
619
+ x_split_mask = torch.unsqueeze(sequence_mask(x_lengths, x_split.size(2)), 1).to(x_split.dtype)
620
+
621
+ # speech_prompt = self.speech_prompt_pos_emb(speech_prompt_proj.unsqueeze(1).transpose(-2,-1)).squeeze(1).transpose(-2,-1)
622
+ # x_split = self.decoder(x_split, x_split_mask, speech_prompt, speech_mask)
623
+
624
+ x_split = self.transformerblock(x_split.transpose(1,2), x_split_mask, speech_prompt_proj.transpose(1,2), speech_mask)
625
+ x_split = x_split.transpose(1,2)
626
+
627
+ # x_split_mask = torch.unsqueeze(sequence_mask(x_lengths, x_split.size(2)), 1).to(x.dtype)
628
+
629
+ # x_split = x_split + x_emb
630
+
631
+ mu = self.proj_m(x_split) * x_split_mask
632
+
633
+ x_dp = torch.detach(x_split)
634
+ logw = self.proj_w(x_dp, x_split_mask)
635
+
636
+ return mu, logw, x_split_mask
pflow/models/components/speech_prompt_encoder_v0.py ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jaywalnut310/glow-tts """
2
+
3
+ import math
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from einops import rearrange
8
+
9
+ import pflow.utils as utils
10
+ from pflow.utils.model import sequence_mask
11
+ from pflow.models.components import commons
12
+ from pflow.models.components.vits_posterior import PosteriorEncoder
13
+ from pflow.models.components.transformer import BasicTransformerBlock
14
+
15
+ log = utils.get_pylogger(__name__)
16
+
17
+ class LayerNorm(nn.Module):
18
+ def __init__(self, channels, eps=1e-4):
19
+ super().__init__()
20
+ self.channels = channels
21
+ self.eps = eps
22
+
23
+ self.gamma = torch.nn.Parameter(torch.ones(channels))
24
+ self.beta = torch.nn.Parameter(torch.zeros(channels))
25
+
26
+ def forward(self, x):
27
+ n_dims = len(x.shape)
28
+ mean = torch.mean(x, 1, keepdim=True)
29
+ variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
30
+
31
+ x = (x - mean) * torch.rsqrt(variance + self.eps)
32
+
33
+ shape = [1, -1] + [1] * (n_dims - 2)
34
+ x = x * self.gamma.view(*shape) + self.beta.view(*shape)
35
+ return x
36
+
37
+
38
+ class ConvReluNorm(nn.Module):
39
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
40
+ super().__init__()
41
+ self.in_channels = in_channels
42
+ self.hidden_channels = hidden_channels
43
+ self.out_channels = out_channels
44
+ self.kernel_size = kernel_size
45
+ self.n_layers = n_layers
46
+ self.p_dropout = p_dropout
47
+
48
+ self.conv_layers = torch.nn.ModuleList()
49
+ self.norm_layers = torch.nn.ModuleList()
50
+ self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
51
+ self.norm_layers.append(LayerNorm(hidden_channels))
52
+ self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout))
53
+ for _ in range(n_layers - 1):
54
+ self.conv_layers.append(
55
+ torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
56
+ )
57
+ self.norm_layers.append(LayerNorm(hidden_channels))
58
+ self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
59
+ self.proj.weight.data.zero_()
60
+ self.proj.bias.data.zero_()
61
+
62
+ def forward(self, x, x_mask):
63
+ x_org = x
64
+ for i in range(self.n_layers):
65
+ x = self.conv_layers[i](x * x_mask)
66
+ x = self.norm_layers[i](x)
67
+ x = self.relu_drop(x)
68
+ x = x_org + self.proj(x)
69
+ return x * x_mask
70
+
71
+
72
+ class DurationPredictor(nn.Module):
73
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout):
74
+ super().__init__()
75
+ self.in_channels = in_channels
76
+ self.filter_channels = filter_channels
77
+ self.p_dropout = p_dropout
78
+
79
+ self.drop = torch.nn.Dropout(p_dropout)
80
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
81
+ self.norm_1 = LayerNorm(filter_channels)
82
+ self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
83
+ self.norm_2 = LayerNorm(filter_channels)
84
+ self.proj = torch.nn.Conv1d(filter_channels, 1, 1)
85
+
86
+ def forward(self, x, x_mask):
87
+ x = self.conv_1(x * x_mask)
88
+ x = torch.relu(x)
89
+ x = self.norm_1(x)
90
+ x = self.drop(x)
91
+ x = self.conv_2(x * x_mask)
92
+ x = torch.relu(x)
93
+ x = self.norm_2(x)
94
+ x = self.drop(x)
95
+ x = self.proj(x * x_mask)
96
+ # x = torch.relu(x)
97
+ return x * x_mask
98
+
99
+ class DurationPredictorNS2(nn.Module):
100
+ def __init__(
101
+ self, in_channels, filter_channels, kernel_size, p_dropout=0.5
102
+ ):
103
+ super().__init__()
104
+
105
+ self.in_channels = in_channels
106
+ self.filter_channels = filter_channels
107
+ self.kernel_size = kernel_size
108
+ self.p_dropout = p_dropout
109
+
110
+ self.drop = nn.Dropout(p_dropout)
111
+ self.conv_1 = nn.Conv1d(
112
+ in_channels, filter_channels, kernel_size, padding=kernel_size // 2
113
+ )
114
+ self.norm_1 = LayerNorm(filter_channels)
115
+
116
+ self.module_list = nn.ModuleList()
117
+ self.module_list.append(self.conv_1)
118
+ self.module_list.append(nn.ReLU())
119
+ self.module_list.append(self.norm_1)
120
+ self.module_list.append(self.drop)
121
+
122
+ for i in range(12):
123
+ self.module_list.append(nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2))
124
+ self.module_list.append(nn.ReLU())
125
+ self.module_list.append(LayerNorm(filter_channels))
126
+ self.module_list.append(nn.Dropout(p_dropout))
127
+
128
+
129
+ # attention layer every 3 layers
130
+ self.attn_list = nn.ModuleList()
131
+ for i in range(4):
132
+ self.attn_list.append(
133
+ Encoder(
134
+ filter_channels,
135
+ filter_channels,
136
+ 8,
137
+ 10,
138
+ 3,
139
+ p_dropout=p_dropout,
140
+ )
141
+ )
142
+
143
+ for i in range(30):
144
+ if i+1 % 3 == 0:
145
+ self.module_list.append(self.attn_list[i//3])
146
+
147
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
148
+
149
+ def forward(self, x, x_mask):
150
+ x = torch.detach(x)
151
+ for layer in self.module_list:
152
+ x = layer(x * x_mask)
153
+ x = self.proj(x * x_mask)
154
+ # x = torch.relu(x)
155
+ return x * x_mask
156
+
157
+ class RotaryPositionalEmbeddings(nn.Module):
158
+ """
159
+ ## RoPE module
160
+
161
+ Rotary encoding transforms pairs of features by rotating in the 2D plane.
162
+ That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
163
+ Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
164
+ by an angle depending on the position of the token.
165
+ """
166
+
167
+ def __init__(self, d: int, base: int = 10_000):
168
+ r"""
169
+ * `d` is the number of features $d$
170
+ * `base` is the constant used for calculating $\Theta$
171
+ """
172
+ super().__init__()
173
+
174
+ self.base = base
175
+ self.d = int(d)
176
+ self.cos_cached = None
177
+ self.sin_cached = None
178
+
179
+ def _build_cache(self, x: torch.Tensor):
180
+ r"""
181
+ Cache $\cos$ and $\sin$ values
182
+ """
183
+ # Return if cache is already built
184
+ if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
185
+ return
186
+
187
+ # Get sequence length
188
+ seq_len = x.shape[0]
189
+
190
+ # $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
191
+ theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
192
+
193
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
194
+ seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
195
+
196
+ # Calculate the product of position index and $\theta_i$
197
+ idx_theta = torch.einsum("n,d->nd", seq_idx, theta)
198
+
199
+ # Concatenate so that for row $m$ we have
200
+ # $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
201
+ idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
202
+
203
+ # Cache them
204
+ self.cos_cached = idx_theta2.cos()[:, None, None, :]
205
+ self.sin_cached = idx_theta2.sin()[:, None, None, :]
206
+
207
+ def _neg_half(self, x: torch.Tensor):
208
+ # $\frac{d}{2}$
209
+ d_2 = self.d // 2
210
+
211
+ # Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
212
+ return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
213
+
214
+ def forward(self, x: torch.Tensor):
215
+ """
216
+ * `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
217
+ """
218
+ # Cache $\cos$ and $\sin$ values
219
+ x = rearrange(x, "b h t d -> t b h d")
220
+
221
+ self._build_cache(x)
222
+
223
+ # Split the features, we can choose to apply rotary embeddings only to a partial set of features.
224
+ x_rope, x_pass = x[..., : self.d], x[..., self.d :]
225
+
226
+ # Calculate
227
+ # $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
228
+ neg_half_x = self._neg_half(x_rope)
229
+
230
+ x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]])
231
+
232
+ return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d")
233
+
234
+
235
+ class MultiHeadAttention(nn.Module):
236
+ def __init__(
237
+ self,
238
+ channels,
239
+ out_channels,
240
+ n_heads,
241
+ heads_share=True,
242
+ p_dropout=0.0,
243
+ proximal_bias=False,
244
+ proximal_init=False,
245
+ ):
246
+ super().__init__()
247
+ assert channels % n_heads == 0
248
+
249
+ self.channels = channels
250
+ self.out_channels = out_channels
251
+ self.n_heads = n_heads
252
+ self.heads_share = heads_share
253
+ self.proximal_bias = proximal_bias
254
+ self.p_dropout = p_dropout
255
+ self.attn = None
256
+
257
+ self.k_channels = channels // n_heads
258
+ self.conv_q = torch.nn.Conv1d(channels, channels, 1)
259
+ self.conv_k = torch.nn.Conv1d(channels, channels, 1)
260
+ self.conv_v = torch.nn.Conv1d(channels, channels, 1)
261
+
262
+ # from https://nn.labml.ai/transformers/rope/index.html
263
+ self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
264
+ self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
265
+
266
+ self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
267
+ self.drop = torch.nn.Dropout(p_dropout)
268
+
269
+ torch.nn.init.xavier_uniform_(self.conv_q.weight)
270
+ torch.nn.init.xavier_uniform_(self.conv_k.weight)
271
+ if proximal_init:
272
+ self.conv_k.weight.data.copy_(self.conv_q.weight.data)
273
+ self.conv_k.bias.data.copy_(self.conv_q.bias.data)
274
+ torch.nn.init.xavier_uniform_(self.conv_v.weight)
275
+
276
+ def forward(self, x, c, attn_mask=None):
277
+ q = self.conv_q(x)
278
+ k = self.conv_k(c)
279
+ v = self.conv_v(c)
280
+
281
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
282
+
283
+ x = self.conv_o(x)
284
+ return x
285
+
286
+ def attention(self, query, key, value, mask=None):
287
+ b, d, t_s, t_t = (*key.size(), query.size(2))
288
+ query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads)
289
+ key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads)
290
+ value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads)
291
+
292
+ query = self.query_rotary_pe(query)
293
+ key = self.key_rotary_pe(key)
294
+
295
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
296
+
297
+ if self.proximal_bias:
298
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
299
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
300
+ if mask is not None:
301
+ scores = scores.masked_fill(mask == 0, -1e4)
302
+ p_attn = torch.nn.functional.softmax(scores, dim=-1)
303
+ p_attn = self.drop(p_attn)
304
+ output = torch.matmul(p_attn, value)
305
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t)
306
+ return output, p_attn
307
+
308
+ @staticmethod
309
+ def _attention_bias_proximal(length):
310
+ r = torch.arange(length, dtype=torch.float32)
311
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
312
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
313
+
314
+
315
+ class FFN(nn.Module):
316
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0):
317
+ super().__init__()
318
+ self.in_channels = in_channels
319
+ self.out_channels = out_channels
320
+ self.filter_channels = filter_channels
321
+ self.kernel_size = kernel_size
322
+ self.p_dropout = p_dropout
323
+
324
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
325
+ self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2)
326
+ self.drop = torch.nn.Dropout(p_dropout)
327
+
328
+ def forward(self, x, x_mask):
329
+ x = self.conv_1(x * x_mask)
330
+ x = torch.relu(x)
331
+ x = self.drop(x)
332
+ x = self.conv_2(x * x_mask)
333
+ return x * x_mask
334
+
335
+
336
+ class Encoder(nn.Module):
337
+ def __init__(
338
+ self,
339
+ hidden_channels,
340
+ filter_channels,
341
+ n_heads,
342
+ n_layers,
343
+ kernel_size=1,
344
+ p_dropout=0.0,
345
+ **kwargs,
346
+ ):
347
+ super().__init__()
348
+ self.hidden_channels = hidden_channels
349
+ self.filter_channels = filter_channels
350
+ self.n_heads = n_heads
351
+ self.n_layers = n_layers
352
+ self.kernel_size = kernel_size
353
+ self.p_dropout = p_dropout
354
+
355
+ self.drop = torch.nn.Dropout(p_dropout)
356
+ self.attn_layers = torch.nn.ModuleList()
357
+ self.norm_layers_1 = torch.nn.ModuleList()
358
+ self.ffn_layers = torch.nn.ModuleList()
359
+ self.norm_layers_2 = torch.nn.ModuleList()
360
+ for _ in range(self.n_layers):
361
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
362
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
363
+ self.ffn_layers.append(
364
+ FFN(
365
+ hidden_channels,
366
+ hidden_channels,
367
+ filter_channels,
368
+ kernel_size,
369
+ p_dropout=p_dropout,
370
+ )
371
+ )
372
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
373
+
374
+ def forward(self, x, x_mask):
375
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
376
+ for i in range(self.n_layers):
377
+ x = x * x_mask
378
+ y = self.attn_layers[i](x, x, attn_mask)
379
+ y = self.drop(y)
380
+ x = self.norm_layers_1[i](x + y)
381
+ y = self.ffn_layers[i](x, x_mask)
382
+ y = self.drop(y)
383
+ x = self.norm_layers_2[i](x + y)
384
+ x = x * x_mask
385
+ return x
386
+
387
+ class Decoder(nn.Module):
388
+ def __init__(
389
+ self,
390
+ hidden_channels,
391
+ filter_channels,
392
+ n_heads,
393
+ n_layers,
394
+ kernel_size=1,
395
+ p_dropout=0.0,
396
+ proximal_bias=False,
397
+ proximal_init=True,
398
+ **kwargs
399
+ ):
400
+ super().__init__()
401
+ self.hidden_channels = hidden_channels
402
+ self.filter_channels = filter_channels
403
+ self.n_heads = n_heads
404
+ self.n_layers = n_layers
405
+ self.kernel_size = kernel_size
406
+ self.p_dropout = p_dropout
407
+ self.proximal_bias = proximal_bias
408
+ self.proximal_init = proximal_init
409
+
410
+ self.drop = nn.Dropout(p_dropout)
411
+ self.self_attn_layers = nn.ModuleList()
412
+ self.norm_layers_0 = nn.ModuleList()
413
+ self.encdec_attn_layers = nn.ModuleList()
414
+ self.norm_layers_1 = nn.ModuleList()
415
+ self.ffn_layers = nn.ModuleList()
416
+ self.norm_layers_2 = nn.ModuleList()
417
+ for i in range(self.n_layers):
418
+ self.self_attn_layers.append(
419
+ MultiHeadAttention(
420
+ hidden_channels,
421
+ hidden_channels,
422
+ n_heads,
423
+ p_dropout=p_dropout
424
+ )
425
+ )
426
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
427
+ self.encdec_attn_layers.append(
428
+ MultiHeadAttention(
429
+ hidden_channels,
430
+ hidden_channels,
431
+ n_heads,
432
+ p_dropout=p_dropout
433
+ )
434
+ )
435
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
436
+ self.ffn_layers.append(
437
+ FFN(
438
+ hidden_channels,
439
+ hidden_channels,
440
+ filter_channels,
441
+ kernel_size,
442
+ p_dropout=p_dropout,
443
+ )
444
+ )
445
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
446
+
447
+ def forward(self, x, x_mask, h, h_mask):
448
+ """
449
+ x: decoder input
450
+ h: encoder output
451
+ """
452
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
453
+ device=x.device, dtype=x.dtype
454
+ )
455
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
456
+ x = x * x_mask
457
+ for i in range(self.n_layers):
458
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
459
+ y = self.drop(y)
460
+ x = self.norm_layers_0[i](x + y)
461
+
462
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
463
+ y = self.drop(y)
464
+ x = self.norm_layers_1[i](x + y)
465
+
466
+ y = self.ffn_layers[i](x, x_mask)
467
+ y = self.drop(y)
468
+ x = self.norm_layers_2[i](x + y)
469
+ x = x * x_mask
470
+ return x
471
+
472
+ class TextEncoder(nn.Module):
473
+ def __init__(
474
+ self,
475
+ encoder_type,
476
+ encoder_params,
477
+ duration_predictor_params,
478
+ n_vocab,
479
+ speech_in_channels,
480
+ ):
481
+ super().__init__()
482
+ self.encoder_type = encoder_type
483
+ self.n_vocab = n_vocab
484
+ self.n_feats = encoder_params.n_feats
485
+ self.n_channels = encoder_params.n_channels
486
+
487
+ self.emb = torch.nn.Embedding(n_vocab, self.n_channels)
488
+ torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5)
489
+
490
+ self.speech_in_channels = speech_in_channels
491
+ self.speech_out_channels = self.n_channels
492
+ # self.speech_prompt_proj = torch.nn.Conv1d(self.speech_in_channels, self.speech_out_channels, 1)
493
+ self.speech_prompt_proj = PosteriorEncoder(
494
+ self.speech_in_channels,
495
+ self.speech_out_channels,
496
+ self.speech_out_channels,
497
+ 1,
498
+ 1,
499
+ 1,
500
+ gin_channels=0,
501
+ )
502
+
503
+ self.prenet = ConvReluNorm(
504
+ self.n_channels,
505
+ self.n_channels,
506
+ self.n_channels,
507
+ kernel_size=5,
508
+ n_layers=3,
509
+ p_dropout=0,
510
+ )
511
+
512
+ # self.speech_prompt_encoder = Encoder(
513
+ # encoder_params.n_channels,
514
+ # encoder_params.filter_channels,
515
+ # encoder_params.n_heads,
516
+ # encoder_params.n_layers,
517
+ # encoder_params.kernel_size,
518
+ # encoder_params.p_dropout,
519
+ # )
520
+
521
+ self.text_base_encoder = Encoder(
522
+ encoder_params.n_channels,
523
+ encoder_params.filter_channels,
524
+ encoder_params.n_heads,
525
+ encoder_params.n_layers,
526
+ encoder_params.kernel_size,
527
+ encoder_params.p_dropout,
528
+ )
529
+
530
+ # self.decoder = Decoder(
531
+ # encoder_params.n_channels,
532
+ # encoder_params.filter_channels,
533
+ # encoder_params.n_heads,
534
+ # encoder_params.n_layers,
535
+ # encoder_params.kernel_size,
536
+ # encoder_params.p_dropout,
537
+ # )
538
+
539
+ self.transformerblock = BasicTransformerBlock(
540
+ encoder_params.n_channels,
541
+ encoder_params.n_heads,
542
+ encoder_params.n_channels // encoder_params.n_heads,
543
+ encoder_params.p_dropout,
544
+ encoder_params.n_channels,
545
+ activation_fn="gelu",
546
+ attention_bias=False,
547
+ only_cross_attention=False,
548
+ double_self_attention=False,
549
+ upcast_attention=False,
550
+ norm_elementwise_affine=True,
551
+ norm_type="layer_norm",
552
+ final_dropout=False,
553
+ )
554
+ self.proj_m = torch.nn.Conv1d(self.n_channels, self.n_feats, 1)
555
+
556
+ self.proj_w = DurationPredictor(
557
+ self.n_channels,
558
+ duration_predictor_params.filter_channels_dp,
559
+ duration_predictor_params.kernel_size,
560
+ duration_predictor_params.p_dropout,
561
+ )
562
+ # self.proj_w = DurationPredictorNS2(
563
+ # self.n_channels,
564
+ # duration_predictor_params.filter_channels_dp,
565
+ # duration_predictor_params.kernel_size,
566
+ # duration_predictor_params.p_dropout,
567
+ # )
568
+
569
+ def forward(
570
+ self,
571
+ x_input,
572
+ x_lengths,
573
+ speech_prompt,
574
+ ):
575
+ """Run forward pass to the transformer based encoder and duration predictor
576
+
577
+ Args:
578
+ x (torch.Tensor): text input
579
+ shape: (batch_size, max_text_length)
580
+ x_lengths (torch.Tensor): text input lengths
581
+ shape: (batch_size,)
582
+ speech_prompt (torch.Tensor): speech prompt input
583
+
584
+ Returns:
585
+ mu (torch.Tensor): average output of the encoder
586
+ shape: (batch_size, n_feats, max_text_length)
587
+ logw (torch.Tensor): log duration predicted by the duration predictor
588
+ shape: (batch_size, 1, max_text_length)
589
+ x_mask (torch.Tensor): mask for the text input
590
+ shape: (batch_size, 1, max_text_length)
591
+ """
592
+ x_emb = self.emb(x_input) * math.sqrt(self.n_channels)
593
+ x_emb = torch.transpose(x_emb, 1, -1)
594
+ x_speech_lengths = x_lengths + speech_prompt.size(2)
595
+ speech_lengths = x_speech_lengths - x_lengths
596
+ # speech_prompt_proj = self.speech_prompt_proj(speech_prompt)
597
+ speech_prompt_proj, speech_mask = self.speech_prompt_proj(speech_prompt, speech_lengths)
598
+ x_speech_cat = torch.cat([speech_prompt_proj, x_emb], dim=2)
599
+ x_speech_mask = torch.unsqueeze(sequence_mask(x_speech_lengths, x_speech_cat.size(2)), 1).to(x_speech_cat.dtype)
600
+
601
+ x_prenet = self.prenet(x_speech_cat, x_speech_mask)
602
+ # split speech prompt and text input
603
+ speech_split = x_prenet[:, :, :speech_prompt_proj.size(2)]
604
+ x_split = x_prenet[:, :, speech_prompt_proj.size(2):]
605
+ x_split_mask = torch.unsqueeze(sequence_mask(x_lengths, x_split.size(2)), 1).to(x_split.dtype)
606
+ speech_lengths = x_speech_lengths - x_lengths
607
+ speech_mask = torch.unsqueeze(sequence_mask(speech_lengths, speech_split.size(2)), 1).to(x_split.dtype)
608
+
609
+ x_split = self.transformerblock(x_split.transpose(1,2), x_split_mask, speech_split.transpose(1,2), speech_mask)
610
+ x_split = x_split.transpose(1,2)
611
+
612
+ # x_split_mask = torch.unsqueeze(sequence_mask(x_lengths, x_split.size(2)), 1).to(x.dtype)
613
+
614
+ mu = self.proj_m(x_split) * x_split_mask
615
+ x_dp = torch.detach(x_split)
616
+ logw = self.proj_w(x_dp, x_split_mask)
617
+
618
+ return mu, logw, x_split_mask
pflow/models/components/test.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from pflow.hifigan.meldataset import mel_spectrogram
2
+ import torch
3
+
4
+ audio = torch.randn(2,1, 1000)
5
+ mels = mel_spectrogram(audio, 1024, 80, 22050, 256, 1024, 0, 8000, center=False)
6
+ print(mels.shape)
pflow/models/components/text_encoder.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jaywalnut310/glow-tts """
2
+
3
+ import math
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from einops import rearrange
8
+
9
+ import pflow.utils as utils
10
+ from pflow.utils.model import sequence_mask
11
+
12
+ log = utils.get_pylogger(__name__)
13
+
14
+
15
+ class LayerNorm(nn.Module):
16
+ def __init__(self, channels, eps=1e-4):
17
+ super().__init__()
18
+ self.channels = channels
19
+ self.eps = eps
20
+
21
+ self.gamma = torch.nn.Parameter(torch.ones(channels))
22
+ self.beta = torch.nn.Parameter(torch.zeros(channels))
23
+
24
+ def forward(self, x):
25
+ n_dims = len(x.shape)
26
+ mean = torch.mean(x, 1, keepdim=True)
27
+ variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
28
+
29
+ x = (x - mean) * torch.rsqrt(variance + self.eps)
30
+
31
+ shape = [1, -1] + [1] * (n_dims - 2)
32
+ x = x * self.gamma.view(*shape) + self.beta.view(*shape)
33
+ return x
34
+
35
+
36
+ class ConvReluNorm(nn.Module):
37
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
38
+ super().__init__()
39
+ self.in_channels = in_channels
40
+ self.hidden_channels = hidden_channels
41
+ self.out_channels = out_channels
42
+ self.kernel_size = kernel_size
43
+ self.n_layers = n_layers
44
+ self.p_dropout = p_dropout
45
+
46
+ self.conv_layers = torch.nn.ModuleList()
47
+ self.norm_layers = torch.nn.ModuleList()
48
+ self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout))
51
+ for _ in range(n_layers - 1):
52
+ self.conv_layers.append(
53
+ torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
54
+ )
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DurationPredictor(nn.Module):
71
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout):
72
+ super().__init__()
73
+ self.in_channels = in_channels
74
+ self.filter_channels = filter_channels
75
+ self.p_dropout = p_dropout
76
+
77
+ self.drop = torch.nn.Dropout(p_dropout)
78
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
79
+ self.norm_1 = LayerNorm(filter_channels)
80
+ self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
81
+ self.norm_2 = LayerNorm(filter_channels)
82
+ self.proj = torch.nn.Conv1d(filter_channels, 1, 1)
83
+
84
+ def forward(self, x, x_mask):
85
+ x = self.conv_1(x * x_mask)
86
+ x = torch.relu(x)
87
+ x = self.norm_1(x)
88
+ x = self.drop(x)
89
+ x = self.conv_2(x * x_mask)
90
+ x = torch.relu(x)
91
+ x = self.norm_2(x)
92
+ x = self.drop(x)
93
+ x = self.proj(x * x_mask)
94
+ return x * x_mask
95
+
96
+
97
+ class RotaryPositionalEmbeddings(nn.Module):
98
+ """
99
+ ## RoPE module
100
+
101
+ Rotary encoding transforms pairs of features by rotating in the 2D plane.
102
+ That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
103
+ Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
104
+ by an angle depending on the position of the token.
105
+ """
106
+
107
+ def __init__(self, d: int, base: int = 10_000):
108
+ r"""
109
+ * `d` is the number of features $d$
110
+ * `base` is the constant used for calculating $\Theta$
111
+ """
112
+ super().__init__()
113
+
114
+ self.base = base
115
+ self.d = int(d)
116
+ self.cos_cached = None
117
+ self.sin_cached = None
118
+
119
+ def _build_cache(self, x: torch.Tensor):
120
+ r"""
121
+ Cache $\cos$ and $\sin$ values
122
+ """
123
+ # Return if cache is already built
124
+ if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
125
+ return
126
+
127
+ # Get sequence length
128
+ seq_len = x.shape[0]
129
+
130
+ # $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
131
+ theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
132
+
133
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
134
+ seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
135
+
136
+ # Calculate the product of position index and $\theta_i$
137
+ idx_theta = torch.einsum("n,d->nd", seq_idx, theta)
138
+
139
+ # Concatenate so that for row $m$ we have
140
+ # $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
141
+ idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
142
+
143
+ # Cache them
144
+ self.cos_cached = idx_theta2.cos()[:, None, None, :]
145
+ self.sin_cached = idx_theta2.sin()[:, None, None, :]
146
+
147
+ def _neg_half(self, x: torch.Tensor):
148
+ # $\frac{d}{2}$
149
+ d_2 = self.d // 2
150
+
151
+ # Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
152
+ return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
153
+
154
+ def forward(self, x: torch.Tensor):
155
+ """
156
+ * `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
157
+ """
158
+ # Cache $\cos$ and $\sin$ values
159
+ x = rearrange(x, "b h t d -> t b h d")
160
+
161
+ self._build_cache(x)
162
+
163
+ # Split the features, we can choose to apply rotary embeddings only to a partial set of features.
164
+ x_rope, x_pass = x[..., : self.d], x[..., self.d :]
165
+
166
+ # Calculate
167
+ # $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
168
+ neg_half_x = self._neg_half(x_rope)
169
+
170
+ x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]])
171
+
172
+ return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d")
173
+
174
+
175
+ class MultiHeadAttention(nn.Module):
176
+ def __init__(
177
+ self,
178
+ channels,
179
+ out_channels,
180
+ n_heads,
181
+ heads_share=True,
182
+ p_dropout=0.0,
183
+ proximal_bias=False,
184
+ proximal_init=False,
185
+ ):
186
+ super().__init__()
187
+ assert channels % n_heads == 0
188
+
189
+ self.channels = channels
190
+ self.out_channels = out_channels
191
+ self.n_heads = n_heads
192
+ self.heads_share = heads_share
193
+ self.proximal_bias = proximal_bias
194
+ self.p_dropout = p_dropout
195
+ self.attn = None
196
+
197
+ self.k_channels = channels // n_heads
198
+ self.conv_q = torch.nn.Conv1d(channels, channels, 1)
199
+ self.conv_k = torch.nn.Conv1d(channels, channels, 1)
200
+ self.conv_v = torch.nn.Conv1d(channels, channels, 1)
201
+
202
+ # from https://nn.labml.ai/transformers/rope/index.html
203
+ self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
204
+ self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
205
+
206
+ self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
207
+ self.drop = torch.nn.Dropout(p_dropout)
208
+
209
+ torch.nn.init.xavier_uniform_(self.conv_q.weight)
210
+ torch.nn.init.xavier_uniform_(self.conv_k.weight)
211
+ if proximal_init:
212
+ self.conv_k.weight.data.copy_(self.conv_q.weight.data)
213
+ self.conv_k.bias.data.copy_(self.conv_q.bias.data)
214
+ torch.nn.init.xavier_uniform_(self.conv_v.weight)
215
+
216
+ def forward(self, x, c, attn_mask=None):
217
+ q = self.conv_q(x)
218
+ k = self.conv_k(c)
219
+ v = self.conv_v(c)
220
+
221
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
222
+
223
+ x = self.conv_o(x)
224
+ return x
225
+
226
+ def attention(self, query, key, value, mask=None):
227
+ b, d, t_s, t_t = (*key.size(), query.size(2))
228
+ query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads)
229
+ key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads)
230
+ value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads)
231
+
232
+ query = self.query_rotary_pe(query)
233
+ key = self.key_rotary_pe(key)
234
+
235
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
236
+
237
+ if self.proximal_bias:
238
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
239
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
240
+ if mask is not None:
241
+ scores = scores.masked_fill(mask == 0, -1e4)
242
+ p_attn = torch.nn.functional.softmax(scores, dim=-1)
243
+ p_attn = self.drop(p_attn)
244
+ output = torch.matmul(p_attn, value)
245
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t)
246
+ return output, p_attn
247
+
248
+ @staticmethod
249
+ def _attention_bias_proximal(length):
250
+ r = torch.arange(length, dtype=torch.float32)
251
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
252
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
253
+
254
+
255
+ class FFN(nn.Module):
256
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0):
257
+ super().__init__()
258
+ self.in_channels = in_channels
259
+ self.out_channels = out_channels
260
+ self.filter_channels = filter_channels
261
+ self.kernel_size = kernel_size
262
+ self.p_dropout = p_dropout
263
+
264
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
265
+ self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2)
266
+ self.drop = torch.nn.Dropout(p_dropout)
267
+
268
+ def forward(self, x, x_mask):
269
+ x = self.conv_1(x * x_mask)
270
+ x = torch.relu(x)
271
+ x = self.drop(x)
272
+ x = self.conv_2(x * x_mask)
273
+ return x * x_mask
274
+
275
+
276
+ class Encoder(nn.Module):
277
+ def __init__(
278
+ self,
279
+ hidden_channels,
280
+ filter_channels,
281
+ n_heads,
282
+ n_layers,
283
+ kernel_size=1,
284
+ p_dropout=0.0,
285
+ **kwargs,
286
+ ):
287
+ super().__init__()
288
+ self.hidden_channels = hidden_channels
289
+ self.filter_channels = filter_channels
290
+ self.n_heads = n_heads
291
+ self.n_layers = n_layers
292
+ self.kernel_size = kernel_size
293
+ self.p_dropout = p_dropout
294
+
295
+ self.drop = torch.nn.Dropout(p_dropout)
296
+ self.attn_layers = torch.nn.ModuleList()
297
+ self.norm_layers_1 = torch.nn.ModuleList()
298
+ self.ffn_layers = torch.nn.ModuleList()
299
+ self.norm_layers_2 = torch.nn.ModuleList()
300
+ for _ in range(self.n_layers):
301
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
302
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
303
+ self.ffn_layers.append(
304
+ FFN(
305
+ hidden_channels,
306
+ hidden_channels,
307
+ filter_channels,
308
+ kernel_size,
309
+ p_dropout=p_dropout,
310
+ )
311
+ )
312
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
313
+
314
+ def forward(self, x, x_mask):
315
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
316
+ for i in range(self.n_layers):
317
+ x = x * x_mask
318
+ y = self.attn_layers[i](x, x, attn_mask)
319
+ y = self.drop(y)
320
+ x = self.norm_layers_1[i](x + y)
321
+ y = self.ffn_layers[i](x, x_mask)
322
+ y = self.drop(y)
323
+ x = self.norm_layers_2[i](x + y)
324
+ x = x * x_mask
325
+ return x
326
+
327
+
328
+ class TextEncoder(nn.Module):
329
+ def __init__(
330
+ self,
331
+ encoder_type,
332
+ encoder_params,
333
+ duration_predictor_params,
334
+ n_vocab,
335
+ n_spks=1,
336
+ spk_emb_dim=128,
337
+ ):
338
+ super().__init__()
339
+ self.encoder_type = encoder_type
340
+ self.n_vocab = n_vocab
341
+ self.n_feats = encoder_params.n_feats
342
+ self.n_channels = encoder_params.n_channels
343
+ self.spk_emb_dim = spk_emb_dim
344
+ self.n_spks = n_spks
345
+
346
+ self.emb = torch.nn.Embedding(n_vocab, self.n_channels)
347
+ torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5)
348
+
349
+ if encoder_params.prenet:
350
+ self.prenet = ConvReluNorm(
351
+ self.n_channels,
352
+ self.n_channels,
353
+ self.n_channels,
354
+ kernel_size=5,
355
+ n_layers=3,
356
+ p_dropout=0.5,
357
+ )
358
+ else:
359
+ self.prenet = lambda x, x_mask: x
360
+
361
+ self.encoder = Encoder(
362
+ encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0),
363
+ encoder_params.filter_channels,
364
+ encoder_params.n_heads,
365
+ encoder_params.n_layers,
366
+ encoder_params.kernel_size,
367
+ encoder_params.p_dropout,
368
+ )
369
+
370
+ self.encoder_dp = Encoder(
371
+ encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0),
372
+ encoder_params.filter_channels,
373
+ encoder_params.n_heads,
374
+ encoder_params.n_layers,
375
+ encoder_params.kernel_size,
376
+ encoder_params.p_dropout,
377
+ )
378
+
379
+ self.proj_m = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1)
380
+ # self.proj_v = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1)
381
+
382
+ self.proj_w = DurationPredictor(
383
+ self.n_channels + (spk_emb_dim if n_spks > 1 else 0),
384
+ duration_predictor_params.filter_channels_dp,
385
+ duration_predictor_params.kernel_size,
386
+ duration_predictor_params.p_dropout,
387
+ )
388
+
389
+ def forward(self, x, x_lengths, spks=None):
390
+ """Run forward pass to the transformer based encoder and duration predictor
391
+
392
+ Args:
393
+ x (torch.Tensor): text input
394
+ shape: (batch_size, max_text_length)
395
+ x_lengths (torch.Tensor): text input lengths
396
+ shape: (batch_size,)
397
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
398
+ shape: (batch_size,)
399
+
400
+ Returns:
401
+ mu (torch.Tensor): average output of the encoder
402
+ shape: (batch_size, n_feats, max_text_length)
403
+ logw (torch.Tensor): log duration predicted by the duration predictor
404
+ shape: (batch_size, 1, max_text_length)
405
+ x_mask (torch.Tensor): mask for the text input
406
+ shape: (batch_size, 1, max_text_length)
407
+ """
408
+ x = self.emb(x) * math.sqrt(self.n_channels)
409
+ x = torch.transpose(x, 1, -1)
410
+ x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
411
+
412
+ x = self.prenet(x, x_mask)
413
+ if self.n_spks > 1:
414
+ x = torch.cat([x, spks.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1)
415
+ x_dp = torch.detach(x)
416
+ x_dp = self.encoder_dp(x_dp, x_mask)
417
+
418
+ x = self.encoder(x, x_mask)
419
+ mu = self.proj_m(x) * x_mask
420
+ # logs = self.proj_v(x) * x_mask
421
+
422
+ # x_dp = torch.detach(x)
423
+ logw = self.proj_w(x_dp, x_mask)
424
+
425
+ return mu, logw, x_mask
pflow/models/components/transformer.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from diffusers.models.attention import (
6
+ GEGLU,
7
+ GELU,
8
+ AdaLayerNorm,
9
+ AdaLayerNormZero,
10
+ ApproximateGELU,
11
+ )
12
+ from diffusers.models.attention_processor import Attention
13
+ from diffusers.models.lora import LoRACompatibleLinear
14
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
15
+
16
+
17
+ class SnakeBeta(nn.Module):
18
+ """
19
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
20
+ Shape:
21
+ - Input: (B, C, T)
22
+ - Output: (B, C, T), same shape as the input
23
+ Parameters:
24
+ - alpha - trainable parameter that controls frequency
25
+ - beta - trainable parameter that controls magnitude
26
+ References:
27
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
28
+ https://arxiv.org/abs/2006.08195
29
+ Examples:
30
+ >>> a1 = snakebeta(256)
31
+ >>> x = torch.randn(256)
32
+ >>> x = a1(x)
33
+ """
34
+
35
+ def __init__(self, in_features, out_features, alpha=1.0, alpha_trainable=True, alpha_logscale=True):
36
+ """
37
+ Initialization.
38
+ INPUT:
39
+ - in_features: shape of the input
40
+ - alpha - trainable parameter that controls frequency
41
+ - beta - trainable parameter that controls magnitude
42
+ alpha is initialized to 1 by default, higher values = higher-frequency.
43
+ beta is initialized to 1 by default, higher values = higher-magnitude.
44
+ alpha will be trained along with the rest of your model.
45
+ """
46
+ super().__init__()
47
+ self.in_features = out_features if isinstance(out_features, list) else [out_features]
48
+ self.proj = LoRACompatibleLinear(in_features, out_features)
49
+
50
+ # initialize alpha
51
+ self.alpha_logscale = alpha_logscale
52
+ if self.alpha_logscale: # log scale alphas initialized to zeros
53
+ self.alpha = nn.Parameter(torch.zeros(self.in_features) * alpha)
54
+ self.beta = nn.Parameter(torch.zeros(self.in_features) * alpha)
55
+ else: # linear scale alphas initialized to ones
56
+ self.alpha = nn.Parameter(torch.ones(self.in_features) * alpha)
57
+ self.beta = nn.Parameter(torch.ones(self.in_features) * alpha)
58
+
59
+ self.alpha.requires_grad = alpha_trainable
60
+ self.beta.requires_grad = alpha_trainable
61
+
62
+ self.no_div_by_zero = 0.000000001
63
+
64
+ def forward(self, x):
65
+ """
66
+ Forward pass of the function.
67
+ Applies the function to the input elementwise.
68
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
69
+ """
70
+ x = self.proj(x)
71
+ if self.alpha_logscale:
72
+ alpha = torch.exp(self.alpha)
73
+ beta = torch.exp(self.beta)
74
+ else:
75
+ alpha = self.alpha
76
+ beta = self.beta
77
+
78
+ x = x + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(torch.sin(x * alpha), 2)
79
+
80
+ return x
81
+
82
+
83
+ class FeedForward(nn.Module):
84
+ r"""
85
+ A feed-forward layer.
86
+
87
+ Parameters:
88
+ dim (`int`): The number of channels in the input.
89
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
90
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
91
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
92
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
93
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
94
+ """
95
+
96
+ def __init__(
97
+ self,
98
+ dim: int,
99
+ dim_out: Optional[int] = None,
100
+ mult: int = 4,
101
+ dropout: float = 0.0,
102
+ activation_fn: str = "geglu",
103
+ final_dropout: bool = False,
104
+ ):
105
+ super().__init__()
106
+ inner_dim = int(dim * mult)
107
+ dim_out = dim_out if dim_out is not None else dim
108
+
109
+ if activation_fn == "gelu":
110
+ act_fn = GELU(dim, inner_dim)
111
+ if activation_fn == "gelu-approximate":
112
+ act_fn = GELU(dim, inner_dim, approximate="tanh")
113
+ elif activation_fn == "geglu":
114
+ act_fn = GEGLU(dim, inner_dim)
115
+ elif activation_fn == "geglu-approximate":
116
+ act_fn = ApproximateGELU(dim, inner_dim)
117
+ elif activation_fn == "snakebeta":
118
+ act_fn = SnakeBeta(dim, inner_dim)
119
+
120
+ self.net = nn.ModuleList([])
121
+ # project in
122
+ self.net.append(act_fn)
123
+ # project dropout
124
+ self.net.append(nn.Dropout(dropout))
125
+ # project out
126
+ self.net.append(LoRACompatibleLinear(inner_dim, dim_out))
127
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
128
+ if final_dropout:
129
+ self.net.append(nn.Dropout(dropout))
130
+
131
+ def forward(self, hidden_states):
132
+ for module in self.net:
133
+ hidden_states = module(hidden_states)
134
+ return hidden_states
135
+
136
+
137
+ @maybe_allow_in_graph
138
+ class BasicTransformerBlock(nn.Module):
139
+ r"""
140
+ A basic Transformer block.
141
+
142
+ Parameters:
143
+ dim (`int`): The number of channels in the input and output.
144
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
145
+ attention_head_dim (`int`): The number of channels in each head.
146
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
147
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
148
+ only_cross_attention (`bool`, *optional*):
149
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
150
+ double_self_attention (`bool`, *optional*):
151
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
152
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
153
+ num_embeds_ada_norm (:
154
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
155
+ attention_bias (:
156
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
157
+ """
158
+
159
+ def __init__(
160
+ self,
161
+ dim: int,
162
+ num_attention_heads: int,
163
+ attention_head_dim: int,
164
+ dropout=0.0,
165
+ cross_attention_dim: Optional[int] = None,
166
+ activation_fn: str = "geglu",
167
+ num_embeds_ada_norm: Optional[int] = None,
168
+ attention_bias: bool = False,
169
+ only_cross_attention: bool = False,
170
+ double_self_attention: bool = False,
171
+ upcast_attention: bool = False,
172
+ norm_elementwise_affine: bool = True,
173
+ norm_type: str = "layer_norm",
174
+ final_dropout: bool = False,
175
+ ):
176
+ super().__init__()
177
+ self.only_cross_attention = only_cross_attention
178
+
179
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
180
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
181
+
182
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
183
+ raise ValueError(
184
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
185
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
186
+ )
187
+
188
+ # Define 3 blocks. Each block has its own normalization layer.
189
+ # 1. Self-Attn
190
+ if self.use_ada_layer_norm:
191
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
192
+ elif self.use_ada_layer_norm_zero:
193
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
194
+ else:
195
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
196
+ self.attn1 = Attention(
197
+ query_dim=dim,
198
+ heads=num_attention_heads,
199
+ dim_head=attention_head_dim,
200
+ dropout=dropout,
201
+ bias=attention_bias,
202
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
203
+ upcast_attention=upcast_attention,
204
+ )
205
+
206
+ # 2. Cross-Attn
207
+ if cross_attention_dim is not None or double_self_attention:
208
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
209
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
210
+ # the second cross attention block.
211
+ self.norm2 = (
212
+ AdaLayerNorm(dim, num_embeds_ada_norm)
213
+ if self.use_ada_layer_norm
214
+ else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
215
+ )
216
+ self.attn2 = Attention(
217
+ query_dim=dim,
218
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
219
+ heads=num_attention_heads,
220
+ dim_head=attention_head_dim,
221
+ dropout=dropout,
222
+ bias=attention_bias,
223
+ upcast_attention=upcast_attention,
224
+ # scale_qk=False, # uncomment this to not to use flash attention
225
+ ) # is self-attn if encoder_hidden_states is none
226
+ else:
227
+ self.norm2 = None
228
+ self.attn2 = None
229
+
230
+ # 3. Feed-forward
231
+ self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
232
+ self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
233
+
234
+ # let chunk size default to None
235
+ self._chunk_size = None
236
+ self._chunk_dim = 0
237
+
238
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):
239
+ # Sets chunk feed-forward
240
+ self._chunk_size = chunk_size
241
+ self._chunk_dim = dim
242
+
243
+ def forward(
244
+ self,
245
+ hidden_states: torch.FloatTensor,
246
+ attention_mask: Optional[torch.FloatTensor] = None,
247
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
248
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
249
+ timestep: Optional[torch.LongTensor] = None,
250
+ cross_attention_kwargs: Dict[str, Any] = None,
251
+ class_labels: Optional[torch.LongTensor] = None,
252
+ ):
253
+ # Notice that normalization is always applied before the real computation in the following blocks.
254
+ # 1. Self-Attention
255
+ if self.use_ada_layer_norm:
256
+ norm_hidden_states = self.norm1(hidden_states, timestep)
257
+ elif self.use_ada_layer_norm_zero:
258
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
259
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
260
+ )
261
+ else:
262
+ norm_hidden_states = self.norm1(hidden_states)
263
+
264
+ cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
265
+
266
+ attn_output = self.attn1(
267
+ norm_hidden_states,
268
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
269
+ attention_mask=encoder_attention_mask if self.only_cross_attention else attention_mask,
270
+ **cross_attention_kwargs,
271
+ )
272
+ if self.use_ada_layer_norm_zero:
273
+ attn_output = gate_msa.unsqueeze(1) * attn_output
274
+ hidden_states = attn_output + hidden_states
275
+
276
+ # 2. Cross-Attention
277
+ if self.attn2 is not None:
278
+ norm_hidden_states = (
279
+ self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
280
+ )
281
+
282
+ attn_output = self.attn2(
283
+ norm_hidden_states,
284
+ encoder_hidden_states=encoder_hidden_states,
285
+ attention_mask=encoder_attention_mask,
286
+ **cross_attention_kwargs,
287
+ )
288
+ hidden_states = attn_output + hidden_states
289
+
290
+ # 3. Feed-forward
291
+ norm_hidden_states = self.norm3(hidden_states)
292
+
293
+ if self.use_ada_layer_norm_zero:
294
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
295
+
296
+ if self._chunk_size is not None:
297
+ # "feed_forward_chunk_size" can be used to save memory
298
+ if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
299
+ raise ValueError(
300
+ f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
301
+ )
302
+
303
+ num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
304
+ ff_output = torch.cat(
305
+ [self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],
306
+ dim=self._chunk_dim,
307
+ )
308
+ else:
309
+ ff_output = self.ff(norm_hidden_states)
310
+
311
+ if self.use_ada_layer_norm_zero:
312
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
313
+
314
+ hidden_states = ff_output + hidden_states
315
+
316
+ return hidden_states
pflow/models/components/vits_modules.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://github.com/jaywalnut310/vits
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from pflow.models.components import commons
7
+
8
+ LRELU_SLOPE = 0.1
9
+
10
+ class LayerNorm(nn.Module):
11
+ def __init__(self, channels, eps=1e-5):
12
+ super().__init__()
13
+ self.channels = channels
14
+ self.eps = eps
15
+
16
+ self.gamma = nn.Parameter(torch.ones(channels))
17
+ self.beta = nn.Parameter(torch.zeros(channels))
18
+
19
+ def forward(self, x):
20
+ x = x.transpose(1, -1)
21
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
22
+ return x.transpose(1, -1)
23
+
24
+
25
+ class ConvReluNorm(nn.Module):
26
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
27
+ super().__init__()
28
+ self.in_channels = in_channels
29
+ self.hidden_channels = hidden_channels
30
+ self.out_channels = out_channels
31
+ self.kernel_size = kernel_size
32
+ self.n_layers = n_layers
33
+ self.p_dropout = p_dropout
34
+ assert n_layers > 1, "Number of layers should be larger than 0."
35
+
36
+ self.conv_layers = nn.ModuleList()
37
+ self.norm_layers = nn.ModuleList()
38
+ self.conv_layers.append(
39
+ nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)
40
+ )
41
+ self.norm_layers.append(LayerNorm(hidden_channels))
42
+ self.relu_drop = nn.Sequential(
43
+ nn.ReLU(),
44
+ nn.Dropout(p_dropout))
45
+ for _ in range(n_layers-1):
46
+ self.conv_layers.append(nn.Conv1d(
47
+ hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)
48
+ )
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
51
+ self.proj.weight.data.zero_()
52
+ self.proj.bias.data.zero_()
53
+
54
+ def forward(self, x, x_mask):
55
+ x_org = x
56
+ for i in range(self.n_layers):
57
+ x = self.conv_layers[i](x * x_mask)
58
+ x = self.norm_layers[i](x)
59
+ x = self.relu_drop(x)
60
+ x = x_org + self.proj(x)
61
+ return x * x_mask
62
+
63
+
64
+ class DDSConv(nn.Module):
65
+ """Dialted and Depth-Separable Convolution"""
66
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
67
+ super().__init__()
68
+ self.channels = channels
69
+ self.kernel_size = kernel_size
70
+ self.n_layers = n_layers
71
+ self.p_dropout = p_dropout
72
+
73
+ self.drop = nn.Dropout(p_dropout)
74
+ self.convs_sep = nn.ModuleList()
75
+ self.convs_1x1 = nn.ModuleList()
76
+ self.norms_1 = nn.ModuleList()
77
+ self.norms_2 = nn.ModuleList()
78
+ for i in range(n_layers):
79
+ dilation = kernel_size ** i
80
+ padding = (kernel_size * dilation - dilation) // 2
81
+ self.convs_sep.append(
82
+ nn.Conv1d(
83
+ channels,
84
+ channels,
85
+ kernel_size,
86
+ groups=channels,
87
+ dilation=dilation,
88
+ padding=padding
89
+ )
90
+ )
91
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
92
+ self.norms_1.append(LayerNorm(channels))
93
+ self.norms_2.append(LayerNorm(channels))
94
+
95
+ def forward(self, x, x_mask, g=None):
96
+ if g is not None:
97
+ x = x + g
98
+ for i in range(self.n_layers):
99
+ y = self.convs_sep[i](x * x_mask)
100
+ y = self.norms_1[i](y)
101
+ y = F.gelu(y)
102
+ y = self.convs_1x1[i](y)
103
+ y = self.norms_2[i](y)
104
+ y = F.gelu(y)
105
+ y = self.drop(y)
106
+ x = x + y
107
+ return x * x_mask
108
+
109
+
110
+ class WN(torch.nn.Module):
111
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
112
+ super(WN, self).__init__()
113
+ assert(kernel_size % 2 == 1)
114
+ self.hidden_channels = hidden_channels
115
+ self.kernel_size = kernel_size,
116
+ self.dilation_rate = dilation_rate
117
+ self.n_layers = n_layers
118
+ self.gin_channels = gin_channels
119
+ self.p_dropout = p_dropout
120
+
121
+ self.in_layers = torch.nn.ModuleList()
122
+ self.res_skip_layers = torch.nn.ModuleList()
123
+ self.drop = nn.Dropout(p_dropout)
124
+
125
+ if gin_channels != 0:
126
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
127
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
128
+
129
+ for i in range(n_layers):
130
+ dilation = dilation_rate ** i
131
+ padding = int((kernel_size * dilation - dilation) / 2)
132
+ in_layer = torch.nn.Conv1d(
133
+ hidden_channels,
134
+ 2*hidden_channels,
135
+ kernel_size,
136
+ dilation=dilation,
137
+ padding=padding
138
+ )
139
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
140
+ self.in_layers.append(in_layer)
141
+
142
+ # last one is not necessary
143
+ if i < n_layers - 1:
144
+ res_skip_channels = 2 * hidden_channels
145
+ else:
146
+ res_skip_channels = hidden_channels
147
+
148
+ res_skip_layer = torch.nn.Conv1d(
149
+ hidden_channels, res_skip_channels, 1
150
+ )
151
+ res_skip_layer = torch.nn.utils.weight_norm(
152
+ res_skip_layer, name='weight'
153
+ )
154
+ self.res_skip_layers.append(res_skip_layer)
155
+
156
+ def forward(self, x, x_mask, g=None, **kwargs):
157
+ output = torch.zeros_like(x)
158
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
159
+ if g is not None:
160
+ g = g.unsqueeze(-1)
161
+ g = self.cond_layer(g)
162
+
163
+ for i in range(self.n_layers):
164
+ x_in = self.in_layers[i](x)
165
+ if g is not None:
166
+ cond_offset = i * 2 * self.hidden_channels
167
+ g_l = g[:, cond_offset:cond_offset+2*self.hidden_channels, :]
168
+ else:
169
+ g_l = torch.zeros_like(x_in)
170
+
171
+ acts = commons.fused_add_tanh_sigmoid_multiply(
172
+ x_in,
173
+ g_l,
174
+ n_channels_tensor
175
+ )
176
+ acts = self.drop(acts)
177
+
178
+ res_skip_acts = self.res_skip_layers[i](acts)
179
+ if i < self.n_layers - 1:
180
+ res_acts = res_skip_acts[:, :self.hidden_channels, :]
181
+ x = (x + res_acts) * x_mask
182
+ output = output + res_skip_acts[:, self.hidden_channels:, :]
183
+ else:
184
+ output = output + res_skip_acts
185
+ return output * x_mask
186
+
187
+ def remove_weight_norm(self):
188
+ if self.gin_channels != 0:
189
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
190
+ for l in self.in_layers:
191
+ torch.nn.utils.remove_weight_norm(l)
192
+ for l in self.res_skip_layers:
193
+ torch.nn.utils.remove_weight_norm(l)
194
+
pflow/models/components/vits_posterior.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch
3
+
4
+ import pflow.models.components.vits_modules as modules
5
+ import pflow.models.components.commons as commons
6
+
7
+ class PosteriorEncoder(nn.Module):
8
+
9
+ def __init__(self,
10
+ in_channels,
11
+ out_channels,
12
+ hidden_channels,
13
+ kernel_size,
14
+ dilation_rate,
15
+ n_layers,
16
+ gin_channels=0):
17
+ super().__init__()
18
+ self.in_channels = in_channels
19
+ self.out_channels = out_channels
20
+ self.hidden_channels = hidden_channels
21
+ self.kernel_size = kernel_size
22
+ self.dilation_rate = dilation_rate
23
+ self.n_layers = n_layers
24
+ self.gin_channels = gin_channels
25
+
26
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
27
+ self.enc = modules.WN(hidden_channels,
28
+ kernel_size,
29
+ dilation_rate,
30
+ n_layers,
31
+ gin_channels=gin_channels)
32
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
33
+
34
+ def forward(self, x, x_lengths, g=None):
35
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)),
36
+ 1).to(x.dtype)
37
+ x = self.pre(x) * x_mask
38
+ x = self.enc(x, x_mask, g=g)
39
+ stats = self.proj(x) * x_mask
40
+ # m, logs = torch.split(stats, self.out_channels, dim=1)
41
+ # z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
42
+ # z = m * x_mask
43
+ return stats, x_mask
pflow/models/components/vits_wn_decoder.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch.nn as nn
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import pflow.models.components.vits_modules as modules
7
+ import pflow.models.components.commons as commons
8
+
9
+ class Mish(nn.Module):
10
+ def forward(self, x):
11
+ return x * torch.tanh(F.softplus(x))
12
+
13
+
14
+ class SinusoidalPosEmb(nn.Module):
15
+ def __init__(self, dim):
16
+ super(SinusoidalPosEmb, self).__init__()
17
+ self.dim = dim
18
+
19
+ def forward(self, x, scale=1000):
20
+ if x.ndim < 1:
21
+ x = x.unsqueeze(0)
22
+ device = x.device
23
+ half_dim = self.dim // 2
24
+ emb = math.log(10000) / (half_dim - 1)
25
+ emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
26
+ emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
27
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
28
+ return emb
29
+
30
+ class VitsWNDecoder(nn.Module):
31
+
32
+ def __init__(self,
33
+ in_channels,
34
+ out_channels,
35
+ hidden_channels,
36
+ kernel_size,
37
+ dilation_rate,
38
+ n_layers,
39
+ gin_channels=0,
40
+ pe_scale=1000
41
+ ):
42
+ super().__init__()
43
+ self.in_channels = in_channels
44
+ self.out_channels = out_channels
45
+ self.hidden_channels = hidden_channels
46
+ self.kernel_size = kernel_size
47
+ self.dilation_rate = dilation_rate
48
+ self.n_layers = n_layers
49
+ self.gin_channels = gin_channels
50
+ self.pe_scale = pe_scale
51
+ self.time_pos_emb = SinusoidalPosEmb(hidden_channels * 2)
52
+ dim = hidden_channels * 2
53
+ self.mlp = nn.Sequential(
54
+ nn.Linear(dim, dim * 4),
55
+ Mish(),
56
+ nn.Linear(dim * 4, dim)
57
+ )
58
+
59
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
60
+ self.enc = modules.WN(hidden_channels * 2,
61
+ kernel_size,
62
+ dilation_rate,
63
+ n_layers,
64
+ gin_channels=gin_channels)
65
+ self.proj = nn.Conv1d(hidden_channels * 2, out_channels, 1)
66
+
67
+ def forward(self, x, x_mask, mu, t, *args, **kwargs):
68
+ # x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)),
69
+ # 1).to(x.dtype)
70
+ t = self.time_pos_emb(t, scale=self.pe_scale)
71
+ t = self.mlp(t)
72
+
73
+ x = self.pre(x) * x_mask
74
+ mu = self.pre(mu)
75
+ x = torch.cat((x, mu), dim=1)
76
+ x = self.enc(x, x_mask, g=t)
77
+ stats = self.proj(x) * x_mask
78
+
79
+ return stats
pflow/models/components/wn_pflow_decoder.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ https://github.com/cantabile-kwok/VoiceFlow-TTS/blob/main/model/diffsinger.py#L51
3
+ This is the original implementation of the DiffSinger model.
4
+ It is a slightly modified WV which can be used for initial tests.
5
+ Will update this into original p-flow implementation later.
6
+ '''
7
+ import math
8
+
9
+ import torch.nn as nn
10
+ import torch
11
+ from torch.nn import Conv1d, Linear
12
+ import math
13
+ import torch.nn.functional as F
14
+
15
+
16
+ class Mish(nn.Module):
17
+ def forward(self, x):
18
+ return x * torch.tanh(F.softplus(x))
19
+
20
+
21
+ class SinusoidalPosEmb(nn.Module):
22
+ def __init__(self, dim):
23
+ super(SinusoidalPosEmb, self).__init__()
24
+ self.dim = dim
25
+
26
+ def forward(self, x, scale=1000):
27
+ if x.ndim < 1:
28
+ x = x.unsqueeze(0)
29
+ device = x.device
30
+ half_dim = self.dim // 2
31
+ emb = math.log(10000) / (half_dim - 1)
32
+ emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
33
+ emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
34
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
35
+ return emb
36
+
37
+
38
+ class ResidualBlock(nn.Module):
39
+ def __init__(self, encoder_hidden, residual_channels, dilation):
40
+ super().__init__()
41
+ self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
42
+ self.diffusion_projection = Linear(residual_channels, residual_channels)
43
+ self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1)
44
+ self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
45
+
46
+ def forward(self, x, conditioner, diffusion_step):
47
+ diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
48
+ conditioner = self.conditioner_projection(conditioner)
49
+ y = x + diffusion_step
50
+
51
+ y = self.dilated_conv(y) + conditioner
52
+
53
+ gate, filter = torch.chunk(y, 2, dim=1)
54
+ y = torch.sigmoid(gate) * torch.tanh(filter)
55
+
56
+ y = self.output_projection(y)
57
+ residual, skip = torch.chunk(y, 2, dim=1)
58
+ return (x + residual) / math.sqrt(2.0), skip
59
+
60
+ class DiffSingerNet(nn.Module):
61
+ def __init__(
62
+ self,
63
+ in_dims=80,
64
+ residual_channels=256,
65
+ encoder_hidden=80,
66
+ dilation_cycle_length=1,
67
+ residual_layers=20,
68
+ pe_scale=1000
69
+ ):
70
+ super().__init__()
71
+
72
+ self.pe_scale = pe_scale
73
+
74
+ self.input_projection = Conv1d(in_dims, residual_channels, 1)
75
+ self.time_pos_emb = SinusoidalPosEmb(residual_channels)
76
+ dim = residual_channels
77
+ self.mlp = nn.Sequential(
78
+ nn.Linear(dim, dim * 4),
79
+ Mish(),
80
+ nn.Linear(dim * 4, dim)
81
+ )
82
+ self.residual_layers = nn.ModuleList([
83
+ ResidualBlock(encoder_hidden, residual_channels, 2 ** (i % dilation_cycle_length))
84
+ for i in range(residual_layers)
85
+ ])
86
+ self.skip_projection = Conv1d(residual_channels, residual_channels, 1)
87
+ self.output_projection = Conv1d(residual_channels, in_dims, 1)
88
+ nn.init.zeros_(self.output_projection.weight)
89
+
90
+ def forward(self, spec, spec_mask, mu, t, *args, **kwargs):
91
+ """
92
+ :param spec: [B, M, T]
93
+ :param t: [B, ]
94
+ :param mu: [B, M, T]
95
+ :return:
96
+ """
97
+ # x = spec[:, 0]
98
+ x = spec
99
+ x = self.input_projection(x) # x [B, residual_channel, T]
100
+
101
+ x = F.relu(x)
102
+
103
+ t = self.time_pos_emb(t, scale=self.pe_scale)
104
+ t = self.mlp(t)
105
+
106
+ cond = mu
107
+
108
+ skip = []
109
+ for layer_id, layer in enumerate(self.residual_layers):
110
+ x, skip_connection = layer(x, cond, t)
111
+ skip.append(skip_connection)
112
+
113
+ x = torch.sum(torch.stack(skip), dim=0) / math.sqrt(len(self.residual_layers))
114
+ x = self.skip_projection(x)
115
+ x = F.relu(x)
116
+ x = self.output_projection(x) # [B, M, T]
117
+ return x * spec_mask
pflow/models/pflow_tts.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ import math
3
+ import random
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+
8
+
9
+ from pflow.models.baselightningmodule import BaseLightningClass
10
+ from pflow.models.components.flow_matching import CFM
11
+ from pflow.models.components.speech_prompt_encoder import TextEncoder
12
+ from pflow.utils.model import (
13
+ denormalize,
14
+ duration_loss,
15
+ fix_len_compatibility,
16
+ generate_path,
17
+ sequence_mask,
18
+ )
19
+ from pflow.models.components import commons
20
+ from pflow.models.components.aligner import Aligner, ForwardSumLoss, BinLoss
21
+
22
+
23
+
24
+ class pflowTTS(BaseLightningClass): #
25
+ def __init__(
26
+ self,
27
+ n_vocab,
28
+ n_feats,
29
+ encoder,
30
+ decoder,
31
+ cfm,
32
+ data_statistics,
33
+ prompt_size=264,
34
+ optimizer=None,
35
+ scheduler=None,
36
+ **kwargs,
37
+ ):
38
+ super().__init__()
39
+
40
+ self.save_hyperparameters(logger=False)
41
+
42
+ self.n_vocab = n_vocab
43
+ self.n_feats = n_feats
44
+ self.prompt_size = prompt_size
45
+ speech_in_channels = n_feats
46
+
47
+ self.encoder = TextEncoder(
48
+ encoder.encoder_type,
49
+ encoder.encoder_params,
50
+ encoder.duration_predictor_params,
51
+ n_vocab,
52
+ speech_in_channels,
53
+ )
54
+
55
+ # self.aligner = Aligner(
56
+ # dim_in=encoder.encoder_params.n_feats,
57
+ # dim_hidden=encoder.encoder_params.n_feats,
58
+ # attn_channels=encoder.encoder_params.n_feats,
59
+ # )
60
+
61
+ # self.aligner_loss = ForwardSumLoss()
62
+ # self.bin_loss = BinLoss()
63
+ # self.aligner_bin_loss_weight = 0.0
64
+
65
+ self.decoder = CFM(
66
+ in_channels=encoder.encoder_params.n_feats,
67
+ out_channel=encoder.encoder_params.n_feats,
68
+ cfm_params=cfm,
69
+ decoder_params=decoder,
70
+ )
71
+
72
+ self.proj_prompt = torch.nn.Conv1d(encoder.encoder_params.n_channels, self.n_feats, 1)
73
+
74
+ self.update_data_statistics(data_statistics)
75
+
76
+ @torch.inference_mode()
77
+ def synthesise(self, x, x_lengths, prompt, n_timesteps, temperature=1.0, length_scale=1.0, guidance_scale=0.0):
78
+
79
+ # For RTF computation
80
+ t = dt.datetime.now()
81
+ assert prompt is not None, "Prompt must be provided for synthesis"
82
+ # Get encoder_outputs `mu_x` and log-scaled token durations `logw`
83
+ mu_x, logw, x_mask = self.encoder(x, x_lengths, prompt)
84
+ w = torch.exp(logw) * x_mask
85
+ w_ceil = torch.ceil(w) * length_scale
86
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
87
+ y_max_length = y_lengths.max()
88
+ y_max_length_ = fix_len_compatibility(y_max_length)
89
+
90
+ # Using obtained durations `w` construct alignment map `attn`
91
+ y_mask = sequence_mask(y_lengths, y_max_length_).unsqueeze(1).to(x_mask.dtype)
92
+ attn_mask = x_mask.unsqueeze(-1) * y_mask.unsqueeze(2)
93
+ attn = generate_path(w_ceil.squeeze(1), attn_mask.squeeze(1)).unsqueeze(1)
94
+
95
+ # Align encoded text and get mu_y
96
+ mu_y = torch.matmul(attn.squeeze(1).transpose(1, 2), mu_x.transpose(1, 2))
97
+ mu_y = mu_y.transpose(1, 2)
98
+ encoder_outputs = mu_y[:, :, :y_max_length]
99
+
100
+ # Generate sample tracing the probability flow
101
+ decoder_outputs = self.decoder(mu_y, y_mask, n_timesteps, temperature, guidance_scale=guidance_scale)
102
+ decoder_outputs = decoder_outputs[:, :, :y_max_length]
103
+
104
+ t = (dt.datetime.now() - t).total_seconds()
105
+ rtf = t * 22050 / (decoder_outputs.shape[-1] * 256)
106
+
107
+ return {
108
+ "encoder_outputs": encoder_outputs,
109
+ "decoder_outputs": decoder_outputs,
110
+ "attn": attn[:, :, :y_max_length],
111
+ "mel": denormalize(decoder_outputs, self.mel_mean, self.mel_std),
112
+ "mel_lengths": y_lengths,
113
+ "rtf": rtf,
114
+ }
115
+
116
+ def forward(self, x, x_lengths, y, y_lengths, prompt=None, cond=None, **kwargs):
117
+ if prompt is None:
118
+ prompt_slice, ids_slice = commons.rand_slice_segments(
119
+ y, y_lengths, self.prompt_size
120
+ )
121
+ else:
122
+ prompt_slice = prompt
123
+ mu_x, logw, x_mask = self.encoder(x, x_lengths, prompt_slice)
124
+
125
+ y_max_length = y.shape[-1]
126
+
127
+ y_mask = sequence_mask(y_lengths, y_max_length).unsqueeze(1).to(x_mask)
128
+ attn_mask = x_mask.unsqueeze(-1) * y_mask.unsqueeze(2)
129
+
130
+ with torch.no_grad():
131
+ # negative cross-entropy
132
+ s_p_sq_r = torch.ones_like(mu_x) # [b, d, t]
133
+ # s_p_sq_r = torch.exp(-2 * logx)
134
+ neg_cent1 = torch.sum(
135
+ -0.5 * math.log(2 * math.pi)- torch.zeros_like(mu_x), [1], keepdim=True
136
+ )
137
+ # neg_cent1 = torch.sum(
138
+ # -0.5 * math.log(2 * math.pi) - logx, [1], keepdim=True
139
+ # ) # [b, 1, t_s]
140
+ neg_cent2 = torch.einsum("bdt, bds -> bts", -0.5 * (y**2), s_p_sq_r)
141
+ neg_cent3 = torch.einsum("bdt, bds -> bts", y, (mu_x * s_p_sq_r))
142
+ neg_cent4 = torch.sum(
143
+ -0.5 * (mu_x**2) * s_p_sq_r, [1], keepdim=True
144
+ )
145
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
146
+
147
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
148
+ from pflow.utils.monotonic_align import maximum_path
149
+ attn = (
150
+ maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
151
+ )
152
+
153
+ logw_ = torch.log(1e-8 + attn.sum(2)) * x_mask
154
+ dur_loss = duration_loss(logw, logw_, x_lengths)
155
+
156
+ # aln_hard, aln_soft, aln_log, aln_mask = self.aligner(
157
+ # mu_x.transpose(1,2), x_mask, y, y_mask
158
+ # )
159
+ # attn = aln_mask.transpose(1,2).unsqueeze(1)
160
+ # align_loss = self.aligner_loss(aln_log, x_lengths, y_lengths)
161
+ # if self.aligner_bin_loss_weight > 0.:
162
+ # align_bin_loss = self.bin_loss(aln_mask, aln_log, x_lengths) * self.aligner_bin_loss_weight
163
+ # align_loss = align_loss + align_bin_loss
164
+ # dur_loss = F.l1_loss(logw, attn.sum(2))
165
+ # dur_loss = dur_loss + align_loss
166
+
167
+ # Align encoded text with mel-spectrogram and get mu_y segment
168
+ attn = attn.squeeze(1).transpose(1,2)
169
+ mu_y = torch.matmul(attn.squeeze(1).transpose(1, 2), mu_x.transpose(1, 2))
170
+ mu_y = mu_y.transpose(1, 2)
171
+
172
+ y_loss_mask = sequence_mask(y_lengths, y_max_length).unsqueeze(1).to(x_mask)
173
+ if prompt is None:
174
+ for i in range(y.size(0)):
175
+ y_loss_mask[i,:,ids_slice[i]:ids_slice[i] + self.prompt_size] = False
176
+ # Compute loss of the decoder
177
+ diff_loss, _ = self.decoder.compute_loss(x1=y.detach(), mask=y_mask, mu=mu_y, cond=cond, loss_mask=y_loss_mask)
178
+
179
+ prior_loss = torch.sum(0.5 * ((y - mu_y) ** 2 + math.log(2 * math.pi)) * y_loss_mask)
180
+ prior_loss = prior_loss / (torch.sum(y_loss_mask) * self.n_feats)
181
+
182
+ return dur_loss, prior_loss, diff_loss, attn
pflow/text/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+ from pflow.text import cleaners
3
+ from pflow.text.symbols import symbols
4
+
5
+ # Mappings from symbol to numeric ID and vice versa:
6
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
7
+ _id_to_symbol = {i: s for i, s in enumerate(symbols)} # pylint: disable=unnecessary-comprehension
8
+
9
+
10
+ def text_to_sequence(text, cleaner_names):
11
+ """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
12
+ Args:
13
+ text: string to convert to a sequence
14
+ cleaner_names: names of the cleaner functions to run the text through
15
+ Returns:
16
+ List of integers corresponding to the symbols in the text
17
+ """
18
+ sequence = []
19
+
20
+ clean_text = _clean_text(text, cleaner_names)
21
+ for symbol in clean_text:
22
+ symbol_id = _symbol_to_id[symbol]
23
+ sequence += [symbol_id]
24
+ return sequence
25
+
26
+
27
+ def cleaned_text_to_sequence(cleaned_text):
28
+ """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
29
+ Args:
30
+ text: string to convert to a sequence
31
+ Returns:
32
+ List of integers corresponding to the symbols in the text
33
+ """
34
+ sequence = [_symbol_to_id[symbol] for symbol in cleaned_text]
35
+ return sequence
36
+
37
+
38
+ def sequence_to_text(sequence):
39
+ """Converts a sequence of IDs back to a string"""
40
+ result = ""
41
+ for symbol_id in sequence:
42
+ s = _id_to_symbol[symbol_id]
43
+ result += s
44
+ return result
45
+
46
+
47
+ def _clean_text(text, cleaner_names):
48
+ for name in cleaner_names:
49
+ cleaner = getattr(cleaners, name)
50
+ if not cleaner:
51
+ raise Exception("Unknown cleaner: %s" % name)
52
+ text = cleaner(text)
53
+ return text
pflow/text/cleaners.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pflow.text.textnormalizer import norm
2
+ from ukrainian_word_stress import Stressifier
3
+ import regex
4
+ import re
5
+ from ipa_uk import ipa
6
+ stressify = Stressifier()
7
+
8
+
9
+ _whitespace_re = re.compile(r"\s+")
10
+ def collapse_whitespace(text):
11
+ return re.sub(_whitespace_re, " ", text)
12
+
13
+
14
+ def ukr_cleaners(text):
15
+ text = collapse_whitespace(text)
16
+ text = norm(text).lower()
17
+
18
+ text = regex.sub(r'[^\p{L}\p{N}\?\!\,\.\-\: ]', '', text)
19
+ return ipa(stressify(text), False)
pflow/text/numbers.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+
3
+ import re
4
+
5
+ import inflect
6
+
7
+ _inflect = inflect.engine()
8
+ _comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
9
+ _decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)")
10
+ _pounds_re = re.compile(r"£([0-9\,]*[0-9]+)")
11
+ _dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)")
12
+ _ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)")
13
+ _number_re = re.compile(r"[0-9]+")
14
+
15
+
16
+ def _remove_commas(m):
17
+ return m.group(1).replace(",", "")
18
+
19
+
20
+ def _expand_decimal_point(m):
21
+ return m.group(1).replace(".", " point ")
22
+
23
+
24
+ def _expand_dollars(m):
25
+ match = m.group(1)
26
+ parts = match.split(".")
27
+ if len(parts) > 2:
28
+ return match + " dollars"
29
+ dollars = int(parts[0]) if parts[0] else 0
30
+ cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
31
+ if dollars and cents:
32
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
33
+ cent_unit = "cent" if cents == 1 else "cents"
34
+ return f"{dollars} {dollar_unit}, {cents} {cent_unit}"
35
+ elif dollars:
36
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
37
+ return f"{dollars} {dollar_unit}"
38
+ elif cents:
39
+ cent_unit = "cent" if cents == 1 else "cents"
40
+ return f"{cents} {cent_unit}"
41
+ else:
42
+ return "zero dollars"
43
+
44
+
45
+ def _expand_ordinal(m):
46
+ return _inflect.number_to_words(m.group(0))
47
+
48
+
49
+ def _expand_number(m):
50
+ num = int(m.group(0))
51
+ if num > 1000 and num < 3000:
52
+ if num == 2000:
53
+ return "two thousand"
54
+ elif num > 2000 and num < 2010:
55
+ return "two thousand " + _inflect.number_to_words(num % 100)
56
+ elif num % 100 == 0:
57
+ return _inflect.number_to_words(num // 100) + " hundred"
58
+ else:
59
+ return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ")
60
+ else:
61
+ return _inflect.number_to_words(num, andword="")
62
+
63
+
64
+ def normalize_numbers(text):
65
+ text = re.sub(_comma_number_re, _remove_commas, text)
66
+ text = re.sub(_pounds_re, r"\1 pounds", text)
67
+ text = re.sub(_dollars_re, _expand_dollars, text)
68
+ text = re.sub(_decimal_number_re, _expand_decimal_point, text)
69
+ text = re.sub(_ordinal_re, _expand_ordinal, text)
70
+ text = re.sub(_number_re, _expand_number, text)
71
+ return text
pflow/text/symbols.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron
2
+
3
+ Defines the set of symbols used in text input to the model.
4
+ """
5
+ _pad = "_"
6
+ _punctuation = '-´;:,.!?¡¿—…"«»“” '
7
+ _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
8
+ _letters_ipa = (
9
+ "éýíó'̯'͡ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
10
+ )
11
+
12
+
13
+ # Export all symbols:
14
+ symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
15
+
16
+ # Special symbol ids
17
+ SPACE_ID = symbols.index(" ")
pflow/text/textnormalizer.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import regex
2
+ from num2words import num2words
3
+ import unicodedata
4
+
5
+ simple_replacements = {
6
+ '№' : 'номер',
7
+ '§': 'номер'
8
+ }
9
+
10
+ masc_replacments_dict = {
11
+ '%':['відсоток', 'відсотки', 'відсотків'],
12
+ 'мм': ['міліметр', 'міліметри', 'міліметрів'],
13
+ 'см': ['сантиметр', 'сантиметри', 'сантиметрів'],
14
+ 'мм': ['міліметр', 'міліметри', 'міліметрів'],
15
+ # 'м': ['метр', 'метри', 'метрів'],
16
+ 'км': ['кілометр', 'кілометри', 'кілометрів'],
17
+ 'гц': ['герц', 'герци', 'герців'],
18
+ 'кгц': ['кілогерц', 'кілогерци', 'кілогерців'],
19
+ 'мгц': ['мегагерц', 'мегагерци', 'мегагерців'],
20
+ 'ггц': ['гігагерц', 'гігагерци', 'гігагерців'],
21
+ 'вт': ['ват', 'вати', 'ватів'],
22
+ 'квт': ['кіловат', 'кіловати', 'кіловатів'],
23
+ 'мвт': ['мегават', 'мегавати', 'мегаватів'],
24
+ 'гвт': ['гігават', 'гігавати', 'гігаватів'],
25
+ 'дж': ['джоуль', 'джоулі', 'джоулів'],
26
+ 'кдж': ['кілоджоуль', 'кілоджоулі', 'кілоджоулів'],
27
+ 'мдж': ['мегаджоуль', 'мегаджоулі', 'мегаджоулів'],
28
+ 'см2': ['сантиметр квадратний', 'сантиметри квадратні', 'сантиметрів квадратних'],
29
+ 'м2': ['метр квадратний', 'метри квадратні', 'метрів квадратних'],
30
+ 'м2': ['кілометр квадратний', 'кілометри квадратні', 'кілометрів квадратних'],
31
+ '$': ['долар', 'долари', 'доларів'],
32
+ '€': ['євро', 'євро', 'євро'],
33
+ }
34
+
35
+ fem_replacments_dict = {
36
+ 'кал': ['калорія', 'калорії', 'калорій'],
37
+ 'ккал': ['кілокалорія', 'кілокалорії', 'кілокалорій'],
38
+ 'грн': ['гривня', 'гривні', 'гривень'],
39
+ 'грв': ['гривня', 'гривні', 'гривень'],
40
+ '₴': ['гривня', 'гривні', 'гривень'],
41
+ }
42
+
43
+ neu_replacments_dict = {
44
+ '€': ['євро', 'євро', 'євро'],
45
+ }
46
+
47
+ all_replacments_keys = list(masc_replacments_dict.keys()) + list(fem_replacments_dict.keys()) + list(neu_replacments_dict.keys())
48
+
49
+ #Ordinal types
50
+ #Називний
51
+ ordinal_nominative_masculine_cases = ('й','ий')
52
+ ordinal_nominative_feminine_cases = ('a','ша', 'я')
53
+ ordinal_nominative_neuter_cases = ('е',)
54
+
55
+ #Родовий
56
+ ordinal_genitive_masculine_case = ('го','о',)
57
+ ordinal_genitive_feminine_case = ('ї', 'ої')
58
+
59
+
60
+ #Давальний
61
+ ordinal_dative_masculine_case = ('му',)
62
+ ordinal_dative_feminine_case = ('й','ій')
63
+
64
+ #Знахідний
65
+ ordinal_accusative_masculine_case = ordinal_genitive_masculine_case
66
+ ordinal_accusative_feminine_case = ('у',)
67
+
68
+ #Орудний
69
+ ordinal_instrumental_masculine_case = ('им', 'ім')
70
+ ordinal_instrumental_feminine_case = ('ю')
71
+
72
+
73
+ #Місцевий
74
+ # ordinal_locative_masculine_case = ordinal_dative_masculine_case
75
+ # ordinal_locative_feminine_case = ordinal_dative_feminine_case
76
+
77
+ numcases_r = regex.compile(rf'((?:^|\s)(\d+)\s*(\-?)(([^\d,]*?)|(\-\.+))(?:\.|,|:|-)?)(\s+[^,.:\-]|$)', regex.IGNORECASE, regex.UNICODE)
78
+
79
+ print(numcases_r)
80
+ cardinal_genitive_endings = ('а', 'e', 'є', 'й')
81
+ ordinal_genitive_cases = ('року',)
82
+
83
+ def number_form(number):
84
+ if number[-1] == "1":
85
+ return 0
86
+ elif number[-1] in ("2", "3", "4"):
87
+ return 1
88
+ else:
89
+ return 2
90
+
91
+ def replace_cases(number, dash, case='', next_word=''):
92
+ print(f'{number}, {dash}, {case}, {next_word}')
93
+ gender = 'masculine'
94
+ m_case = 'nominative'
95
+ to = 'ordinal'
96
+ repl = ''
97
+ if not dash:
98
+ if case in all_replacments_keys:
99
+ if case in masc_replacments_dict.keys():
100
+ repl = masc_replacments_dict.get(case)[number_form(number)]
101
+ gender = 'masculine'
102
+ elif case in fem_replacments_dict.keys():
103
+ repl = fem_replacments_dict.get(case)[number_form(number)]
104
+ gender = 'feminine'
105
+ elif case in neu_replacments_dict.keys():
106
+ repl = neu_replacments_dict.get(case)[number_form(number)]
107
+ gender = 'neuter'
108
+ to = 'cardinal'
109
+ else:
110
+ if len(case) < 3 and case and case[-1] in cardinal_genitive_endings:
111
+ m_case = 'genitive'
112
+ gender='masculine'
113
+ to = 'cardinal'
114
+ elif case in ordinal_genitive_cases:
115
+ to = 'ordinal'
116
+ m_case = 'genitive'
117
+ repl = case
118
+ else:
119
+ to = 'cardinal'
120
+ repl = case
121
+
122
+ else:
123
+ if case in ordinal_nominative_masculine_cases:
124
+ m_case = 'nominative'
125
+ gender = 'masculine'
126
+ elif case in ordinal_nominative_feminine_cases:
127
+ m_case = 'nominative'
128
+ gender = 'feminine'
129
+ elif case in ordinal_nominative_neuter_cases:
130
+ m_case = 'nominative'
131
+ gender = 'neuter'
132
+ elif case in ordinal_genitive_masculine_case:
133
+ m_case = 'genitive'
134
+ gender = 'masculine'
135
+ elif case in ordinal_genitive_feminine_case:
136
+ m_case = 'genitive'
137
+ gender = 'feminine'
138
+ elif case in ordinal_dative_masculine_case:
139
+ m_case = 'dative'
140
+ gender = 'masculine'
141
+ elif case in ordinal_dative_feminine_case:
142
+ m_case = 'dative'
143
+ gender = 'feminine'
144
+ elif case in ordinal_accusative_feminine_case:
145
+ m_case = 'accusative'
146
+ gender = 'feminine'
147
+ elif case in ordinal_instrumental_masculine_case:
148
+ m_case = 'instrumental'
149
+ gender = 'masculine'
150
+ elif case in ordinal_instrumental_feminine_case:
151
+ m_case = 'instrumental'
152
+ gender = 'feminine'
153
+ else:
154
+ if case and case[-1] in cardinal_genitive_endings:
155
+ m_case = 'genitive'
156
+ gender='masculine'
157
+ to = 'cardinal'
158
+ repl = case
159
+ else:
160
+ print(f'UNKNOWN CASE {number}-{case}')
161
+
162
+ return_str = num2words(number, to=to, lang='uk', case=m_case, gender=gender)
163
+ if repl:
164
+ return_str += ' ' + repl
165
+ if not next_word or (next_word and next_word.strip().isupper()):
166
+ return_str += '.'
167
+ return return_str
168
+
169
+ def norm(text):
170
+ text = regex.sub(r'[\t\n]', ' ', text)
171
+ text = regex.sub(rf"[{''.join(simple_replacements.keys())}]", lambda x: f' {simple_replacements[x.group()]} ', text)
172
+ text = regex.sub(r"(\d)\s+(\d)", r"\1\2", text)
173
+ text = regex.sub(r'\s+', ' ', text)
174
+ text = unicodedata.normalize('NFC', text)
175
+ matches = numcases_r.finditer(text)
176
+ pos = 0
177
+ new_text = ''
178
+ for m in matches:
179
+ repl = replace_cases(m.group(2), m.group(3), m.group(4), m.group(7))
180
+ new_text += text[pos:m.start(0)]+ ' ' + repl
181
+ pos = m.end(1)
182
+ new_text += text[pos:]
183
+ return new_text.strip()
184
+
185
+
186
+
187
+ #1-го квітня, на 1-му поверсі Яринка загубила 2грн але знайшла 5€. Але її 4-річна сестричка забрала 50% її знахідки.
188
+ #Також 2003 року щось там сталося і 40-річний чоловік помер. Його знайшли через 3 години.
189
+
190
+ #01:51:37.250 -> 01:51:44.650: Серед міленіалів цей показник становить 39%, серед покоління X – 30%,
191
+ #39
192
+ #30
193
+ #MATCHED: серед міленіалів цей показник становить тридцять девять , серед покоління Х - тридцять ,
194
+ #Skipped because contains inapropirate characters
195
+
196
+ #05:28:52.350 -> 05:29:00.000: 2016 рік завершився з чистими збитками 1,2 мільярди доларів США.
197
+ #2016
198
+ #MATCHED: дві тисячі шістнадцять рік завершився з чистими збитками 1,2 млрд доларів США.
pflow/utils/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from pflow.utils.instantiators import instantiate_callbacks, instantiate_loggers
2
+ from pflow.utils.logging_utils import log_hyperparameters
3
+ from pflow.utils.pylogger import get_pylogger
4
+ from pflow.utils.rich_utils import enforce_tags, print_config_tree
5
+ from pflow.utils.utils import extras, get_metric_value, task_wrapper
pflow/utils/audio.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.utils.data
4
+ from librosa.filters import mel as librosa_mel_fn
5
+ from scipy.io.wavfile import read
6
+
7
+ MAX_WAV_VALUE = 32768.0
8
+
9
+
10
+ def load_wav(full_path):
11
+ sampling_rate, data = read(full_path)
12
+ return data, sampling_rate
13
+
14
+
15
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
16
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
17
+
18
+
19
+ def dynamic_range_decompression(x, C=1):
20
+ return np.exp(x) / C
21
+
22
+
23
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
24
+ return torch.log(torch.clamp(x, min=clip_val) * C)
25
+
26
+
27
+ def dynamic_range_decompression_torch(x, C=1):
28
+ return torch.exp(x) / C
29
+
30
+
31
+ def spectral_normalize_torch(magnitudes):
32
+ output = dynamic_range_compression_torch(magnitudes)
33
+ return output
34
+
35
+
36
+ def spectral_de_normalize_torch(magnitudes):
37
+ output = dynamic_range_decompression_torch(magnitudes)
38
+ return output
39
+
40
+
41
+ mel_basis = {}
42
+ hann_window = {}
43
+
44
+
45
+ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
46
+ if torch.min(y) < -1.0:
47
+ print("min value is ", torch.min(y))
48
+ if torch.max(y) > 1.0:
49
+ print("max value is ", torch.max(y))
50
+
51
+ global mel_basis, hann_window # pylint: disable=global-statement
52
+ if f"{str(fmax)}_{str(y.device)}" not in mel_basis:
53
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
54
+ mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
55
+ hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
56
+
57
+ y = torch.nn.functional.pad(
58
+ y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
59
+ )
60
+ y = y.squeeze(1)
61
+
62
+ spec = torch.view_as_real(
63
+ torch.stft(
64
+ y,
65
+ n_fft,
66
+ hop_length=hop_size,
67
+ win_length=win_size,
68
+ window=hann_window[str(y.device)],
69
+ center=center,
70
+ pad_mode="reflect",
71
+ normalized=False,
72
+ onesided=True,
73
+ return_complex=True,
74
+ )
75
+ )
76
+
77
+ spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
78
+
79
+ spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec)
80
+ spec = spectral_normalize_torch(spec)
81
+
82
+ return spec
pflow/utils/generate_data_statistics.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ The file creates a pickle file where the values needed for loading of dataset is stored and the model can load it
3
+ when needed.
4
+
5
+ Parameters from hparam.py will be used
6
+ """
7
+ import os
8
+
9
+ import sys
10
+ sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
11
+
12
+ import argparse
13
+ import json
14
+ import sys
15
+ from pathlib import Path
16
+
17
+ import rootutils
18
+ import torch
19
+ from hydra import compose, initialize
20
+ from omegaconf import open_dict
21
+ from tqdm.auto import tqdm
22
+
23
+ from pflow.data.text_mel_datamodule import TextMelDataModule
24
+ from pflow.utils.logging_utils import pylogger
25
+
26
+ log = pylogger.get_pylogger(__name__)
27
+
28
+
29
+ def compute_data_statistics(data_loader: torch.utils.data.DataLoader, out_channels: int):
30
+ """Generate data mean and standard deviation helpful in data normalisation
31
+
32
+ Args:
33
+ data_loader (torch.utils.data.Dataloader): _description_
34
+ out_channels (int): mel spectrogram channels
35
+ """
36
+ total_mel_sum = 0
37
+ total_mel_sq_sum = 0
38
+ total_mel_len = 0
39
+
40
+ for batch in tqdm(data_loader, leave=False):
41
+ mels = batch["y"]
42
+ mel_lengths = batch["y_lengths"]
43
+
44
+ total_mel_len += torch.sum(mel_lengths)
45
+ total_mel_sum += torch.sum(mels)
46
+ total_mel_sq_sum += torch.sum(torch.pow(mels, 2))
47
+
48
+ data_mean = total_mel_sum / (total_mel_len * out_channels)
49
+ data_std = torch.sqrt((total_mel_sq_sum / (total_mel_len * out_channels)) - torch.pow(data_mean, 2))
50
+
51
+ return {"mel_mean": data_mean.item(), "mel_std": data_std.item()}
52
+
53
+
54
+ def main():
55
+ parser = argparse.ArgumentParser()
56
+
57
+ parser.add_argument(
58
+ "-i",
59
+ "--input-config",
60
+ type=str,
61
+ default="vctk.yaml",
62
+ help="The name of the yaml config file under configs/data",
63
+ )
64
+
65
+ parser.add_argument(
66
+ "-b",
67
+ "--batch-size",
68
+ type=int,
69
+ default="256",
70
+ help="Can have increased batch size for faster computation",
71
+ )
72
+
73
+ parser.add_argument(
74
+ "-f",
75
+ "--force",
76
+ action="store_true",
77
+ default=False,
78
+ required=False,
79
+ help="force overwrite the file",
80
+ )
81
+ args = parser.parse_args()
82
+ output_file = Path(args.input_config).with_suffix(".json")
83
+
84
+ if os.path.exists(output_file) and not args.force:
85
+ print("File already exists. Use -f to force overwrite")
86
+ sys.exit(1)
87
+
88
+ with initialize(version_base="1.3", config_path="../../configs/data"):
89
+ cfg = compose(config_name=args.input_config, return_hydra_config=True, overrides=[])
90
+
91
+ root_path = rootutils.find_root(search_from=__file__, indicator=".project-root")
92
+
93
+ with open_dict(cfg):
94
+ del cfg["hydra"]
95
+ del cfg["_target_"]
96
+ cfg["data_statistics"] = None
97
+ cfg["seed"] = 1234
98
+ cfg["batch_size"] = args.batch_size
99
+ cfg["train_filelist_path"] = str(os.path.join(root_path, cfg["train_filelist_path"]))
100
+ cfg["valid_filelist_path"] = str(os.path.join(root_path, cfg["valid_filelist_path"]))
101
+
102
+ text_mel_datamodule = TextMelDataModule(**cfg)
103
+ text_mel_datamodule.setup()
104
+ data_loader = text_mel_datamodule.train_dataloader()
105
+ log.info("Dataloader loaded! Now computing stats...")
106
+ params = compute_data_statistics(data_loader, cfg["n_feats"])
107
+ print(params)
108
+ json.dump(
109
+ params,
110
+ open(output_file, "w"),
111
+ )
112
+
113
+
114
+ if __name__ == "__main__":
115
+ main()
pflow/utils/instantiators.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import hydra
4
+ from lightning import Callback
5
+ from lightning.pytorch.loggers import Logger
6
+ from omegaconf import DictConfig
7
+
8
+ from pflow.utils import pylogger
9
+
10
+ log = pylogger.get_pylogger(__name__)
11
+
12
+
13
+ def instantiate_callbacks(callbacks_cfg: DictConfig) -> List[Callback]:
14
+ """Instantiates callbacks from config.
15
+
16
+ :param callbacks_cfg: A DictConfig object containing callback configurations.
17
+ :return: A list of instantiated callbacks.
18
+ """
19
+ callbacks: List[Callback] = []
20
+
21
+ if not callbacks_cfg:
22
+ log.warning("No callback configs found! Skipping..")
23
+ return callbacks
24
+
25
+ if not isinstance(callbacks_cfg, DictConfig):
26
+ raise TypeError("Callbacks config must be a DictConfig!")
27
+
28
+ for _, cb_conf in callbacks_cfg.items():
29
+ if isinstance(cb_conf, DictConfig) and "_target_" in cb_conf:
30
+ log.info(f"Instantiating callback <{cb_conf._target_}>") # pylint: disable=protected-access
31
+ callbacks.append(hydra.utils.instantiate(cb_conf))
32
+
33
+ return callbacks
34
+
35
+
36
+ def instantiate_loggers(logger_cfg: DictConfig) -> List[Logger]:
37
+ """Instantiates loggers from config.
38
+
39
+ :param logger_cfg: A DictConfig object containing logger configurations.
40
+ :return: A list of instantiated loggers.
41
+ """
42
+ logger: List[Logger] = []
43
+
44
+ if not logger_cfg:
45
+ log.warning("No logger configs found! Skipping...")
46
+ return logger
47
+
48
+ if not isinstance(logger_cfg, DictConfig):
49
+ raise TypeError("Logger config must be a DictConfig!")
50
+
51
+ for _, lg_conf in logger_cfg.items():
52
+ if isinstance(lg_conf, DictConfig) and "_target_" in lg_conf:
53
+ log.info(f"Instantiating logger <{lg_conf._target_}>") # pylint: disable=protected-access
54
+ logger.append(hydra.utils.instantiate(lg_conf))
55
+
56
+ return logger
pflow/utils/logging_utils.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+
3
+ from lightning.pytorch.utilities import rank_zero_only
4
+ from omegaconf import OmegaConf
5
+
6
+ from pflow.utils import pylogger
7
+
8
+ log = pylogger.get_pylogger(__name__)
9
+
10
+
11
+ @rank_zero_only
12
+ def log_hyperparameters(object_dict: Dict[str, Any]) -> None:
13
+ """Controls which config parts are saved by Lightning loggers.
14
+
15
+ Additionally saves:
16
+ - Number of model parameters
17
+
18
+ :param object_dict: A dictionary containing the following objects:
19
+ - `"cfg"`: A DictConfig object containing the main config.
20
+ - `"model"`: The Lightning model.
21
+ - `"trainer"`: The Lightning trainer.
22
+ """
23
+ hparams = {}
24
+
25
+ cfg = OmegaConf.to_container(object_dict["cfg"])
26
+ model = object_dict["model"]
27
+ trainer = object_dict["trainer"]
28
+
29
+ if not trainer.logger:
30
+ log.warning("Logger not found! Skipping hyperparameter logging...")
31
+ return
32
+
33
+ hparams["model"] = cfg["model"]
34
+
35
+ # save number of model parameters
36
+ hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
37
+ hparams["model/params/trainable"] = sum(p.numel() for p in model.parameters() if p.requires_grad)
38
+ hparams["model/params/non_trainable"] = sum(p.numel() for p in model.parameters() if not p.requires_grad)
39
+
40
+ hparams["data"] = cfg["data"]
41
+ hparams["trainer"] = cfg["trainer"]
42
+
43
+ hparams["callbacks"] = cfg.get("callbacks")
44
+ hparams["extras"] = cfg.get("extras")
45
+
46
+ hparams["task_name"] = cfg.get("task_name")
47
+ hparams["tags"] = cfg.get("tags")
48
+ hparams["ckpt_path"] = cfg.get("ckpt_path")
49
+ hparams["seed"] = cfg.get("seed")
50
+
51
+ # send hparams to all loggers
52
+ for logger in trainer.loggers:
53
+ logger.log_hyperparams(hparams)
pflow/utils/model.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/jaywalnut310/glow-tts """
2
+
3
+ import numpy as np
4
+ import torch
5
+
6
+
7
+ def sequence_mask(length, max_length=None):
8
+ if max_length is None:
9
+ max_length = length.max()
10
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
11
+ return x.unsqueeze(0) < length.unsqueeze(1)
12
+
13
+
14
+ def fix_len_compatibility(length, num_downsamplings_in_unet=2):
15
+ factor = torch.scalar_tensor(2).pow(num_downsamplings_in_unet)
16
+ length = (length / factor).ceil() * factor
17
+ if not torch.onnx.is_in_onnx_export():
18
+ return length.int().item()
19
+ else:
20
+ return length
21
+
22
+
23
+ def convert_pad_shape(pad_shape):
24
+ inverted_shape = pad_shape[::-1]
25
+ pad_shape = [item for sublist in inverted_shape for item in sublist]
26
+ return pad_shape
27
+
28
+
29
+ def generate_path(duration, mask):
30
+ device = duration.device
31
+
32
+ b, t_x, t_y = mask.shape
33
+ cum_duration = torch.cumsum(duration, 1)
34
+ path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device)
35
+
36
+ cum_duration_flat = cum_duration.view(b * t_x)
37
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
38
+ path = path.view(b, t_x, t_y)
39
+ path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
40
+ path = path * mask
41
+ return path
42
+
43
+
44
+ def duration_loss(logw, logw_, lengths):
45
+ loss = torch.sum((logw - logw_) ** 2) / torch.sum(lengths)
46
+ return loss
47
+
48
+
49
+ def normalize(data, mu, std):
50
+ if not isinstance(mu, (float, int)):
51
+ if isinstance(mu, list):
52
+ mu = torch.tensor(mu, dtype=data.dtype, device=data.device)
53
+ elif isinstance(mu, torch.Tensor):
54
+ mu = mu.to(data.device)
55
+ elif isinstance(mu, np.ndarray):
56
+ mu = torch.from_numpy(mu).to(data.device)
57
+ mu = mu.unsqueeze(-1)
58
+
59
+ if not isinstance(std, (float, int)):
60
+ if isinstance(std, list):
61
+ std = torch.tensor(std, dtype=data.dtype, device=data.device)
62
+ elif isinstance(std, torch.Tensor):
63
+ std = std.to(data.device)
64
+ elif isinstance(std, np.ndarray):
65
+ std = torch.from_numpy(std).to(data.device)
66
+ std = std.unsqueeze(-1)
67
+
68
+ return (data - mu) / std
69
+
70
+
71
+ def denormalize(data, mu, std):
72
+ if not isinstance(mu, float):
73
+ if isinstance(mu, list):
74
+ mu = torch.tensor(mu, dtype=data.dtype, device=data.device)
75
+ elif isinstance(mu, torch.Tensor):
76
+ mu = mu.to(data.device)
77
+ elif isinstance(mu, np.ndarray):
78
+ mu = torch.from_numpy(mu).to(data.device)
79
+ mu = mu.unsqueeze(-1)
80
+
81
+ if not isinstance(std, float):
82
+ if isinstance(std, list):
83
+ std = torch.tensor(std, dtype=data.dtype, device=data.device)
84
+ elif isinstance(std, torch.Tensor):
85
+ std = std.to(data.device)
86
+ elif isinstance(std, np.ndarray):
87
+ std = torch.from_numpy(std).to(data.device)
88
+ std = std.unsqueeze(-1)
89
+
90
+ return data * std + mu
pflow/utils/monotonic_align/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from pflow.utils.monotonic_align.core import maximum_path_c
4
+
5
+
6
+ def maximum_path(neg_cent, mask):
7
+ """Cython optimized version.
8
+ neg_cent: [b, t_t, t_s]
9
+ mask: [b, t_t, t_s]
10
+ """
11
+ device = neg_cent.device
12
+ dtype = neg_cent.dtype
13
+ neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
14
+ path = np.zeros(neg_cent.shape, dtype=np.int32)
15
+
16
+ t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
17
+ t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
18
+ maximum_path_c(path, neg_cent, t_t_max, t_s_max)
19
+ return torch.from_numpy(path).to(device=device, dtype=dtype)
pflow/utils/monotonic_align/core.pyx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport cython
2
+ from cython.parallel import prange
3
+
4
+
5
+ @cython.boundscheck(False)
6
+ @cython.wraparound(False)
7
+ cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil:
8
+ cdef int x
9
+ cdef int y
10
+ cdef float v_prev
11
+ cdef float v_cur
12
+ cdef float tmp
13
+ cdef int index = t_x - 1
14
+
15
+ for y in range(t_y):
16
+ for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
17
+ if x == y:
18
+ v_cur = max_neg_val
19
+ else:
20
+ v_cur = value[y-1, x]
21
+ if x == 0:
22
+ if y == 0:
23
+ v_prev = 0.
24
+ else:
25
+ v_prev = max_neg_val
26
+ else:
27
+ v_prev = value[y-1, x-1]
28
+ value[y, x] += max(v_prev, v_cur)
29
+
30
+ for y in range(t_y - 1, -1, -1):
31
+ path[y, index] = 1
32
+ if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
33
+ index = index - 1
34
+
35
+
36
+ @cython.boundscheck(False)
37
+ @cython.wraparound(False)
38
+ cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil:
39
+ cdef int b = paths.shape[0]
40
+ cdef int i
41
+ for i in prange(b, nogil=True):
42
+ maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
pflow/utils/pylogger.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ from lightning.pytorch.utilities import rank_zero_only
4
+
5
+
6
+ def get_pylogger(name: str = __name__) -> logging.Logger:
7
+ """Initializes a multi-GPU-friendly python command line logger.
8
+
9
+ :param name: The name of the logger, defaults to ``__name__``.
10
+
11
+ :return: A logger object.
12
+ """
13
+ logger = logging.getLogger(name)
14
+
15
+ # this ensures all logging levels get marked with the rank zero decorator
16
+ # otherwise logs would get multiplied for each GPU process in multi-GPU setup
17
+ logging_levels = ("debug", "info", "warning", "error", "exception", "fatal", "critical")
18
+ for level in logging_levels:
19
+ setattr(logger, level, rank_zero_only(getattr(logger, level)))
20
+
21
+ return logger