Text-to-Speech
speechbrain
English
TTS
speech-synthesis
Tacotron2
Krisshvamsi commited on
Commit
bd0655f
1 Parent(s): caa1859

Upload 2 files

Browse files
Files changed (2) hide show
  1. model.ckpt +3 -0
  2. models.py +240 -0
model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45e8e10afd13fa8bf1563f8babdc4779d3316ec227eaabf8c57dab9e4f794ded
3
+ size 226346982
models.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ import torch.nn.functional as F
7
+
8
+
9
+ def dynamic_batch_collate(batch):
10
+ """
11
+ Collates batches dynamically based on the length of sequences within each batch.
12
+ This function ensures that each batch contains sequences of similar lengths,
13
+ optimizing padding and computational efficiency.
14
+
15
+ Args:
16
+ batch: A list of dictionaries, each containing 'id', 'phoneme_seq_encoded',
17
+ 'mel_spectrogram', 'mel_length', 'stop_token_targets'.
18
+
19
+ Returns:
20
+ A batch of sequences where sequences are padded to match the longest sequence in the batch.
21
+ """
22
+ # Sort the batch by 'mel_length' in descending order for efficient packing
23
+ batch.sort(key=lambda x: x['mel_lengths'], reverse=True)
24
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
25
+
26
+ # Extract sequences and their lengths
27
+ ids = [item['id'] for item in batch]
28
+ phoneme_seqs = [item['phoneme_seq_encoded'] for item in batch]
29
+ mel_specs = [item['mel_spec'] for item in batch]
30
+ #bos_mel_specs = [item['bos_mel_spectrogram'] for item in batch]
31
+ #eos_mel_specs = [item['eos_mel_spectrogram'] for item in batch]
32
+ mel_lengths = torch.tensor([item['mel_lengths'] for item in batch], device=device)
33
+ stop_token_targets = [item['stop_token_targets'] for item in batch]
34
+
35
+ # Pad phoneme sequences
36
+ phoneme_seq_padded = torch.nn.utils.rnn.pad_sequence(phoneme_seqs, batch_first=True, padding_value=0).to(device)
37
+
38
+ # Find the maximum mel length for padding
39
+ max_len = max(mel_lengths).item()
40
+ num_mel_bins = 80
41
+
42
+ mel_specs_padded = torch.zeros((len(mel_specs), num_mel_bins, max_len), device=device)
43
+ for i, mel in enumerate(mel_specs):
44
+ mel_len = mel.shape[1]
45
+ mel_specs_padded[i, :, :mel_len] = mel.to(device)
46
+
47
+ # # Pad mel spectrograms
48
+ # bos_mel_specs_padded = torch.zeros((len(bos_mel_specs), num_mel_bins, max_len), device=device)
49
+ # for i, mel in enumerate(bos_mel_specs):
50
+ # mel_len = mel.shape[1]
51
+ # bos_mel_specs_padded[i, :, :mel_len] = mel.to(device)
52
+ #
53
+ # eos_mel_specs_padded = torch.zeros((len(eos_mel_specs), num_mel_bins, max_len), device=device)
54
+ # for i, mel in enumerate(eos_mel_specs):
55
+ # mel_len = mel.shape[1]
56
+ # eos_mel_specs_padded[i, :, :mel_len] = mel.to(device)
57
+
58
+ # Pad stop token targets
59
+ stop_token_targets_padded = torch.zeros((len(stop_token_targets), max_len), device=device)
60
+ for i, stop in enumerate(stop_token_targets):
61
+ stop_len = stop.size(0)
62
+ stop_token_targets_padded[i, :stop_len] = stop.to(device)
63
+
64
+ return ids, phoneme_seq_padded, mel_specs_padded, mel_lengths, stop_token_targets_padded
65
+
66
+
67
+ class EncoderPrenet(torch.nn.Module):
68
+ """
69
+ Module for the encoder prenet in the Transformer-based TTS system.
70
+
71
+ This module consists of several convolutional layers followed by batch normalization,
72
+ ReLU activation, and dropout. It then performs a linear projection to the desired dimension.
73
+
74
+ Parameters:
75
+ input_dim (int): Dimension of the input features. Defaults to 512.
76
+ hidden_dim (int): Dimension of the hidden layers. Defaults to 512.
77
+ num_layers (int): Number of convolutional layers. Defaults to 3.
78
+ dropout (float): Dropout probability. Defaults to 0.2.
79
+
80
+ Inputs:
81
+ x (torch.Tensor): Input tensor of shape (batch_size, seq_len, input_dim).
82
+
83
+ Returns:
84
+ torch.Tensor: Output tensor of shape (batch_size, seq_len, hidden_dim). """
85
+ def __init__(self, input_dim=512, hidden_dim=512, num_layers=3, dropout=0.2):
86
+ super().__init__()
87
+
88
+ # Convolutional layers
89
+ conv_layers = []
90
+ for _ in range(num_layers):
91
+ conv_layers.append(nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1))
92
+ conv_layers.append(nn.BatchNorm1d(hidden_dim))
93
+ conv_layers.append(nn.ReLU())
94
+ conv_layers.append(nn.Dropout(dropout))
95
+ self.conv_layers = nn.Sequential(*conv_layers)
96
+
97
+ # Final linear projection
98
+ self.projection = nn.Linear(hidden_dim, hidden_dim)
99
+
100
+ def forward(self, x):
101
+ x = x.transpose(1, 2) # Transpose for convolutional layers (Batch, SeqLen, Channels)
102
+ x = self.conv_layers(x)
103
+ x = x.transpose(1, 2) # Transpose back
104
+ x = self.projection(x)
105
+ return x
106
+
107
+
108
+ class DecoderPrenet(torch.nn.Module):
109
+
110
+ """
111
+ Module for the decoder prenet in the Transformer-based TTS system.
112
+
113
+ This module consists of two fully connected layers followed by ReLU activation,
114
+ and performs a linear projection to the desired output dimension.
115
+
116
+ Parameters:
117
+ input_dim (int): Dimension of the input features. Defaults to 80.
118
+ hidden_dim (int): Dimension of the hidden layers. Defaults to 256.
119
+ output_dim (int): Dimension of the output features. Defaults to 512.
120
+
121
+ Inputs:
122
+ x (torch.Tensor): Input tensor of shape (batch_size, seq_len, input_dim).
123
+
124
+ Returns:
125
+ torch.Tensor: Output tensor of shape (batch_size, seq_len, output_dim). """
126
+
127
+ def __init__(self, input_dim=80, hidden_dim=256, output_dim=512):
128
+ super().__init__()
129
+
130
+ self.fc1 = nn.Linear(input_dim, hidden_dim)
131
+ self.fc2 = nn.Linear(hidden_dim, hidden_dim)
132
+ self.projection = nn.Linear(hidden_dim, output_dim)
133
+
134
+ def forward(self, x):
135
+ x = x.transpose(1,2)
136
+ x = F.relu(self.fc1(x))
137
+ x = F.relu(self.fc2(x))
138
+ x = self.projection(x)
139
+
140
+ return x
141
+
142
+
143
+ class ScaledPositionalEncoding(nn.Module):
144
+
145
+ """
146
+ Module for adding scaled positional encoding to input sequences.
147
+
148
+ Parameters:
149
+ d_model (int): Dimensionality of the model. It must match the embedding dimension of the input.
150
+ max_len (int): Maximum length of the input sequence. Defaults to 5000.
151
+
152
+ Inputs:
153
+ x (torch.Tensor): Input tensor of shape (batch_size, seq_len, embedding_dim).
154
+
155
+ Returns:
156
+ torch.Tensor: Output tensor with scaled positional encoding added, shape (batch_size, seq_len, embedding_dim). """
157
+
158
+ def __init__(self, d_model, max_len=5000):
159
+ super().__init__()
160
+ self.d_model = d_model
161
+
162
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
163
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
164
+
165
+ pe = torch.zeros(max_len, 1, d_model)
166
+ pe[:, 0, 0::2] = torch.sin(position * div_term)
167
+ pe[:, 0, 1::2] = torch.cos(position * div_term)
168
+
169
+ self.register_buffer('pe', pe)
170
+ self.scale = nn.Parameter(torch.ones(1))
171
+
172
+ def forward(self, x):
173
+ """
174
+ Adds scaled positional encoding to input tensor x.
175
+ Args:
176
+ x: Tensor of shape [batch_size, seq_len, embedding_dim]
177
+ """
178
+ scaled_pe = self.pe[:x.size(0), :, :] * self.scale
179
+ x = x + scaled_pe
180
+ return x
181
+
182
+
183
+ class PostNet(torch.nn.Module):
184
+
185
+ """
186
+ Post-processing network for mel-spectrogram enhancement.
187
+
188
+ This module consists of multiple convolutional layers with batch normalization and ReLU activation.
189
+ It is used to refine the mel-spectrogram output from the decoder.
190
+
191
+ Parameters:
192
+ mel_channels (int): Number of mel channels in the input mel-spectrogram.
193
+ postnet_channels (int): Number of channels in the postnet layers.
194
+ kernel_size (int): Size of the convolutional kernel.
195
+ postnet_layers (int): Number of postnet layers.
196
+
197
+ Inputs:
198
+ x (torch.Tensor): Input tensor of shape (batch_size, seq_len, mel_channels).
199
+
200
+ Returns:
201
+ torch.Tensor: Output tensor with refined mel-spectrogram, shape (batch_size, seq_len, mel_channels). """
202
+
203
+
204
+ def __init__(self, mel_channels, postnet_channels, kernel_size, postnet_layers):
205
+ super().__init__()
206
+ self.conv_layers = nn.ModuleList()
207
+
208
+ # First layer
209
+ self.conv_layers.append(
210
+ nn.Sequential(
211
+ nn.Conv1d(mel_channels, postnet_channels, kernel_size, padding=kernel_size // 2),
212
+ nn.BatchNorm1d(postnet_channels),
213
+ nn.ReLU()
214
+ )
215
+ )
216
+
217
+ # Middle layers
218
+ for _ in range(1, postnet_layers - 1):
219
+ self.conv_layers.append(
220
+ nn.Sequential(
221
+ nn.Conv1d(postnet_channels, postnet_channels, kernel_size, padding=kernel_size // 2),
222
+ nn.BatchNorm1d(postnet_channels),
223
+ nn.ReLU()
224
+ )
225
+ )
226
+
227
+ # Final layer
228
+ self.conv_layers.append(
229
+ nn.Sequential(
230
+ nn.Conv1d(postnet_channels, mel_channels, kernel_size, padding=kernel_size // 2),
231
+ nn.BatchNorm1d(mel_channels)
232
+ )
233
+ )
234
+
235
+ def forward(self, x):
236
+ x = x.transpose(1, 2)
237
+ for conv in self.conv_layers:
238
+ x = conv(x)
239
+ x = x.transpose(1, 2)
240
+ return x