| """Core model definitions for Chiluka TTS.""" |
|
|
| import os |
| import math |
| import yaml |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.nn.utils import weight_norm, spectral_norm |
| from collections import OrderedDict |
| from munch import Munch |
|
|
| from transformers import AlbertConfig, AlbertModel |
|
|
| from .diffusion.sampler import KDiffusion, LogNormalDistribution |
| from .diffusion.modules import Transformer1d, StyleTransformer1d |
| from .diffusion.diffusion import AudioDiffusionConditional |
| from .hifigan import Decoder |
|
|
|
|
| |
|
|
| class DownSample(nn.Module): |
| def __init__(self, layer_type): |
| super().__init__() |
| self.layer_type = layer_type |
|
|
| def forward(self, x): |
| if self.layer_type == 'none': |
| return x |
| elif self.layer_type == 'timepreserve': |
| return F.avg_pool2d(x, (2, 1)) |
| elif self.layer_type == 'half': |
| if x.shape[-1] % 2 != 0: |
| x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1) |
| return F.avg_pool2d(x, 2) |
| else: |
| raise RuntimeError(f'Unexpected downsample type {self.layer_type}') |
|
|
|
|
| class LearnedDownSample(nn.Module): |
| def __init__(self, layer_type, dim_in): |
| super().__init__() |
| self.layer_type = layer_type |
| if self.layer_type == 'none': |
| self.conv = nn.Identity() |
| elif self.layer_type == 'timepreserve': |
| self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, padding=(1, 0))) |
| elif self.layer_type == 'half': |
| self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, padding=1)) |
| else: |
| raise RuntimeError(f'Unexpected downsample type {self.layer_type}') |
|
|
| def forward(self, x): |
| return self.conv(x) |
|
|
|
|
| class ResBlk(nn.Module): |
| def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=False, downsample='none'): |
| super().__init__() |
| self.actv = actv |
| self.normalize = normalize |
| self.downsample = DownSample(downsample) |
| self.downsample_res = LearnedDownSample(downsample, dim_in) |
| self.learned_sc = dim_in != dim_out |
| self._build_weights(dim_in, dim_out) |
|
|
| def _build_weights(self, dim_in, dim_out): |
| self.conv1 = spectral_norm(nn.Conv2d(dim_in, dim_in, 3, 1, 1)) |
| self.conv2 = spectral_norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1)) |
| if self.normalize: |
| self.norm1 = nn.InstanceNorm2d(dim_in, affine=True) |
| self.norm2 = nn.InstanceNorm2d(dim_in, affine=True) |
| if self.learned_sc: |
| self.conv1x1 = spectral_norm(nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)) |
|
|
| def _shortcut(self, x): |
| if self.learned_sc: |
| x = self.conv1x1(x) |
| if self.downsample: |
| x = self.downsample(x) |
| return x |
|
|
| def _residual(self, x): |
| if self.normalize: |
| x = self.norm1(x) |
| x = self.actv(x) |
| x = self.conv1(x) |
| x = self.downsample_res(x) |
| if self.normalize: |
| x = self.norm2(x) |
| x = self.actv(x) |
| x = self.conv2(x) |
| return x |
|
|
| def forward(self, x): |
| x = self._shortcut(x) + self._residual(x) |
| return x / math.sqrt(2) |
|
|
|
|
| class StyleEncoder(nn.Module): |
| def __init__(self, dim_in=48, style_dim=48, max_conv_dim=384): |
| super().__init__() |
| blocks = [] |
| blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))] |
| repeat_num = 4 |
| for _ in range(repeat_num): |
| dim_out = min(dim_in * 2, max_conv_dim) |
| blocks += [ResBlk(dim_in, dim_out, downsample='half')] |
| dim_in = dim_out |
| blocks += [nn.LeakyReLU(0.2)] |
| blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))] |
| blocks += [nn.AdaptiveAvgPool2d(1)] |
| blocks += [nn.LeakyReLU(0.2)] |
| self.shared = nn.Sequential(*blocks) |
| self.unshared = nn.Linear(dim_out, style_dim) |
|
|
| def forward(self, x): |
| h = self.shared(x) |
| h = h.view(h.size(0), -1) |
| s = self.unshared(h) |
| return s |
|
|
|
|
| |
|
|
| class LayerNorm(nn.Module): |
| def __init__(self, channels, eps=1e-5): |
| super().__init__() |
| self.channels = channels |
| self.eps = eps |
| self.gamma = nn.Parameter(torch.ones(channels)) |
| self.beta = nn.Parameter(torch.zeros(channels)) |
|
|
| def forward(self, x): |
| x = x.transpose(1, -1) |
| x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) |
| return x.transpose(1, -1) |
|
|
|
|
| class LinearNorm(nn.Module): |
| def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): |
| super().__init__() |
| self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) |
| nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.calculate_gain(w_init_gain)) |
|
|
| def forward(self, x): |
| return self.linear_layer(x) |
|
|
|
|
| class TextEncoder(nn.Module): |
| def __init__(self, channels, kernel_size, depth, n_symbols, actv=nn.LeakyReLU(0.2)): |
| super().__init__() |
| self.embedding = nn.Embedding(n_symbols, channels) |
| padding = (kernel_size - 1) // 2 |
| self.cnn = nn.ModuleList() |
| for _ in range(depth): |
| self.cnn.append(nn.Sequential( |
| weight_norm(nn.Conv1d(channels, channels, kernel_size=kernel_size, padding=padding)), |
| LayerNorm(channels), |
| actv, |
| nn.Dropout(0.2), |
| )) |
| self.lstm = nn.LSTM(channels, channels // 2, 1, batch_first=True, bidirectional=True) |
|
|
| def forward(self, x, input_lengths, m): |
| x = self.embedding(x) |
| x = x.transpose(1, 2) |
| m = m.to(input_lengths.device).unsqueeze(1) |
| x.masked_fill_(m, 0.0) |
| for c in self.cnn: |
| x = c(x) |
| x.masked_fill_(m, 0.0) |
| x = x.transpose(1, 2) |
| input_lengths = input_lengths.cpu().numpy() |
| x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True, enforce_sorted=False) |
| self.lstm.flatten_parameters() |
| x, _ = self.lstm(x) |
| x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True) |
| x = x.transpose(-1, -2) |
| x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]]) |
| x_pad[:, :, :x.shape[-1]] = x |
| x = x_pad.to(x.device) |
| x.masked_fill_(m, 0.0) |
| return x |
|
|
|
|
| |
|
|
| class AdaIN1d(nn.Module): |
| def __init__(self, style_dim, num_features): |
| super().__init__() |
| self.norm = nn.InstanceNorm1d(num_features, affine=False) |
| self.fc = nn.Linear(style_dim, num_features * 2) |
|
|
| def forward(self, x, s): |
| h = self.fc(s) |
| h = h.view(h.size(0), h.size(1), 1) |
| gamma, beta = torch.chunk(h, chunks=2, dim=1) |
| return (1 + gamma) * self.norm(x) + beta |
|
|
|
|
| class UpSample1d(nn.Module): |
| def __init__(self, layer_type): |
| super().__init__() |
| self.layer_type = layer_type |
|
|
| def forward(self, x): |
| if self.layer_type == 'none': |
| return x |
| else: |
| return F.interpolate(x, scale_factor=2, mode='nearest') |
|
|
|
|
| class AdainResBlk1d(nn.Module): |
| def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2), upsample='none', dropout_p=0.0): |
| super().__init__() |
| self.actv = actv |
| self.upsample_type = upsample |
| self.upsample = UpSample1d(upsample) |
| self.learned_sc = dim_in != dim_out |
| self._build_weights(dim_in, dim_out, style_dim) |
| self.dropout = nn.Dropout(dropout_p) |
| if upsample == 'none': |
| self.pool = nn.Identity() |
| else: |
| self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1)) |
|
|
| def _build_weights(self, dim_in, dim_out, style_dim): |
| self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1)) |
| self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1)) |
| self.norm1 = AdaIN1d(style_dim, dim_in) |
| self.norm2 = AdaIN1d(style_dim, dim_out) |
| if self.learned_sc: |
| self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False)) |
|
|
| def _shortcut(self, x): |
| x = self.upsample(x) |
| if self.learned_sc: |
| x = self.conv1x1(x) |
| return x |
|
|
| def _residual(self, x, s): |
| x = self.norm1(x, s) |
| x = self.actv(x) |
| x = self.pool(x) |
| x = self.conv1(self.dropout(x)) |
| x = self.norm2(x, s) |
| x = self.actv(x) |
| x = self.conv2(self.dropout(x)) |
| return x |
|
|
| def forward(self, x, s): |
| out = self._residual(x, s) |
| out = (out + self._shortcut(x)) / math.sqrt(2) |
| return out |
|
|
|
|
| class AdaLayerNorm(nn.Module): |
| def __init__(self, style_dim, channels, eps=1e-5): |
| super().__init__() |
| self.channels = channels |
| self.eps = eps |
| self.fc = nn.Linear(style_dim, channels * 2) |
|
|
| def forward(self, x, s): |
| x = x.transpose(-1, -2) |
| x = x.transpose(1, -1) |
| h = self.fc(s) |
| h = h.view(h.size(0), h.size(1), 1) |
| gamma, beta = torch.chunk(h, chunks=2, dim=1) |
| gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1) |
| x = F.layer_norm(x, (self.channels,), eps=self.eps) |
| x = (1 + gamma) * x + beta |
| return x.transpose(1, -1).transpose(-1, -2) |
|
|
|
|
| class DurationEncoder(nn.Module): |
| def __init__(self, sty_dim, d_model, nlayers, dropout=0.1): |
| super().__init__() |
| self.lstms = nn.ModuleList() |
| for _ in range(nlayers): |
| self.lstms.append(nn.LSTM(d_model + sty_dim, d_model // 2, num_layers=1, batch_first=True, bidirectional=True, dropout=dropout)) |
| self.lstms.append(AdaLayerNorm(sty_dim, d_model)) |
| self.dropout = dropout |
| self.d_model = d_model |
| self.sty_dim = sty_dim |
|
|
| def forward(self, x, style, text_lengths, m): |
| masks = m.to(text_lengths.device) |
| x = x.permute(2, 0, 1) |
| s = style.expand(x.shape[0], x.shape[1], -1) |
| x = torch.cat([x, s], axis=-1) |
| x.masked_fill_(masks.unsqueeze(-1).transpose(0, 1), 0.0) |
| x = x.transpose(0, 1) |
| input_lengths = text_lengths.cpu().numpy() |
| x = x.transpose(-1, -2) |
| for block in self.lstms: |
| if isinstance(block, AdaLayerNorm): |
| x = block(x.transpose(-1, -2), style).transpose(-1, -2) |
| x = torch.cat([x, s.permute(1, -1, 0)], axis=1) |
| x.masked_fill_(masks.unsqueeze(-1).transpose(-1, -2), 0.0) |
| else: |
| x = x.transpose(-1, -2) |
| x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True, enforce_sorted=False) |
| block.flatten_parameters() |
| x, _ = block(x) |
| x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True) |
| x = F.dropout(x, p=self.dropout, training=self.training) |
| x = x.transpose(-1, -2) |
| x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]]) |
| x_pad[:, :, :x.shape[-1]] = x |
| x = x_pad.to(x.device) |
| return x.transpose(-1, -2) |
|
|
|
|
| class ProsodyPredictor(nn.Module): |
| def __init__(self, style_dim, d_hid, nlayers, max_dur=50, dropout=0.1): |
| super().__init__() |
| self.text_encoder = DurationEncoder(sty_dim=style_dim, d_model=d_hid, nlayers=nlayers, dropout=dropout) |
| self.lstm = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True) |
| self.duration_proj = LinearNorm(d_hid, max_dur) |
| self.shared = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True) |
| self.F0 = nn.ModuleList() |
| self.F0.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout)) |
| self.F0.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout)) |
| self.F0.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout)) |
| self.N = nn.ModuleList() |
| self.N.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout)) |
| self.N.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout)) |
| self.N.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout)) |
| self.F0_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0) |
| self.N_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0) |
|
|
| def forward(self, texts, style, text_lengths, alignment, m): |
| d = self.text_encoder(texts, style, text_lengths, m) |
| input_lengths = text_lengths.cpu().numpy() |
| x = nn.utils.rnn.pack_padded_sequence(d, input_lengths, batch_first=True, enforce_sorted=False) |
| m = m.to(text_lengths.device).unsqueeze(1) |
| self.lstm.flatten_parameters() |
| x, _ = self.lstm(x) |
| x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True) |
| x_pad = torch.zeros([x.shape[0], m.shape[-1], x.shape[-1]]) |
| x_pad[:, :x.shape[1], :] = x |
| x = x_pad.to(x.device) |
| duration = self.duration_proj(F.dropout(x, 0.5, training=self.training)) |
| en = (d.transpose(-1, -2) @ alignment) |
| return duration.squeeze(-1), en |
|
|
| def F0Ntrain(self, x, s): |
| x, _ = self.shared(x.transpose(-1, -2)) |
| F0 = x.transpose(-1, -2) |
| for block in self.F0: |
| F0 = block(F0, s) |
| F0 = self.F0_proj(F0) |
| N = x.transpose(-1, -2) |
| for block in self.N: |
| N = block(N, s) |
| N = self.N_proj(N) |
| return F0.squeeze(1), N.squeeze(1) |
|
|
|
|
| |
|
|
| class CustomAlbert(AlbertModel): |
| def forward(self, *args, **kwargs): |
| outputs = super().forward(*args, **kwargs) |
| return outputs.last_hidden_state |
|
|
|
|
| def load_plbert(log_dir): |
| """Load PL-BERT model from directory.""" |
| config_path = os.path.join(log_dir, "config.yml") |
| plbert_config = yaml.safe_load(open(config_path)) |
| albert_base_configuration = AlbertConfig(**plbert_config['model_params']) |
| bert = CustomAlbert(albert_base_configuration) |
| files = os.listdir(log_dir) |
| ckpts = [f for f in files if f.startswith("step_")] |
| iters = [int(f.split('_')[-1].split('.')[0]) for f in ckpts if os.path.isfile(os.path.join(log_dir, f))] |
| iters = sorted(iters)[-1] |
| checkpoint = torch.load(os.path.join(log_dir, f"step_{iters}.t7"), map_location='cpu') |
| state_dict = checkpoint['net'] |
| new_state_dict = OrderedDict() |
| for k, v in state_dict.items(): |
| name = k[7:] |
| if name.startswith('encoder.'): |
| name = name[8:] |
| new_state_dict[name] = v |
| if "embeddings.position_ids" in new_state_dict: |
| del new_state_dict["embeddings.position_ids"] |
| bert.load_state_dict(new_state_dict, strict=False) |
| return bert |
|
|
|
|
| |
| import torchaudio |
| import torchaudio.functional as audio_F |
|
|
|
|
| class MFCC(nn.Module): |
| def __init__(self, n_mfcc=40, n_mels=80): |
| super().__init__() |
| self.n_mfcc = n_mfcc |
| self.n_mels = n_mels |
| self.norm = 'ortho' |
| dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm) |
| self.register_buffer('dct_mat', dct_mat) |
|
|
| def forward(self, mel_specgram): |
| if len(mel_specgram.shape) == 2: |
| mel_specgram = mel_specgram.unsqueeze(0) |
| unsqueezed = True |
| else: |
| unsqueezed = False |
| mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2) |
| if unsqueezed: |
| mfcc = mfcc.squeeze(0) |
| return mfcc |
|
|
|
|
| class ConvNorm(nn.Module): |
| def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): |
| super().__init__() |
| if padding is None: |
| padding = int(dilation * (kernel_size - 1) / 2) |
| self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) |
| nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.calculate_gain(w_init_gain)) |
|
|
| def forward(self, signal): |
| return self.conv(signal) |
|
|
|
|
| class ConvBlock(nn.Module): |
| def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'): |
| super().__init__() |
| self._n_groups = 8 |
| self.blocks = nn.ModuleList([self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p) for i in range(n_conv)]) |
|
|
| def forward(self, x): |
| for block in self.blocks: |
| res = x |
| x = block(x) |
| x += res |
| return x |
|
|
| def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2): |
| layers = [ |
| ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation), |
| nn.ReLU() if activ == 'relu' else nn.LeakyReLU(0.2), |
| nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim), |
| nn.Dropout(p=dropout_p), |
| ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1), |
| nn.ReLU() if activ == 'relu' else nn.LeakyReLU(0.2), |
| nn.Dropout(p=dropout_p) |
| ] |
| return nn.Sequential(*layers) |
|
|
|
|
| class LocationLayer(nn.Module): |
| def __init__(self, attention_n_filters, attention_kernel_size, attention_dim): |
| super().__init__() |
| padding = int((attention_kernel_size - 1) / 2) |
| self.location_conv = ConvNorm(2, attention_n_filters, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1) |
| self.location_dense = LinearNorm(attention_n_filters, attention_dim, bias=False, w_init_gain='tanh') |
|
|
| def forward(self, attention_weights_cat): |
| processed_attention = self.location_conv(attention_weights_cat) |
| processed_attention = processed_attention.transpose(1, 2) |
| processed_attention = self.location_dense(processed_attention) |
| return processed_attention |
|
|
|
|
| class Attention(nn.Module): |
| def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, attention_location_n_filters, attention_location_kernel_size): |
| super().__init__() |
| self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, bias=False, w_init_gain='tanh') |
| self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, w_init_gain='tanh') |
| self.v = LinearNorm(attention_dim, 1, bias=False) |
| self.location_layer = LocationLayer(attention_location_n_filters, attention_location_kernel_size, attention_dim) |
| self.score_mask_value = -float("inf") |
|
|
| def forward(self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask): |
| processed_query = self.query_layer(attention_hidden_state.unsqueeze(1)) |
| processed_attention = self.location_layer(attention_weights_cat) |
| energies = self.v(torch.tanh(processed_query + processed_attention + processed_memory)) |
| energies = energies.squeeze(-1) |
| if mask is not None: |
| energies.data.masked_fill_(mask, self.score_mask_value) |
| attention_weights = F.softmax(energies, dim=1) |
| attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) |
| attention_context = attention_context.squeeze(1) |
| return attention_context, attention_weights |
|
|
|
|
| class ASRS2S(nn.Module): |
| def __init__(self, embedding_dim=256, hidden_dim=512, n_location_filters=32, location_kernel_size=63, n_token=40): |
| super().__init__() |
| self.embedding = nn.Embedding(n_token, embedding_dim) |
| val_range = math.sqrt(6 / hidden_dim) |
| self.embedding.weight.data.uniform_(-val_range, val_range) |
| self.decoder_rnn_dim = hidden_dim |
| self.project_to_n_symbols = nn.Linear(self.decoder_rnn_dim, n_token) |
| self.attention_layer = Attention(self.decoder_rnn_dim, hidden_dim, hidden_dim, n_location_filters, location_kernel_size) |
| self.decoder_rnn = nn.LSTMCell(self.decoder_rnn_dim + embedding_dim, self.decoder_rnn_dim) |
| self.project_to_hidden = nn.Sequential(LinearNorm(self.decoder_rnn_dim * 2, hidden_dim), nn.Tanh()) |
| self.sos = 1 |
| self.eos = 2 |
| self.unk_index = 3 |
| self.random_mask = 0.1 |
|
|
| def initialize_decoder_states(self, memory, mask): |
| B, L, H = memory.shape |
| self.decoder_hidden = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory) |
| self.decoder_cell = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory) |
| self.attention_weights = torch.zeros((B, L)).type_as(memory) |
| self.attention_weights_cum = torch.zeros((B, L)).type_as(memory) |
| self.attention_context = torch.zeros((B, H)).type_as(memory) |
| self.memory = memory |
| self.processed_memory = self.attention_layer.memory_layer(memory) |
| self.mask = mask |
|
|
| def forward(self, memory, memory_mask, text_input): |
| self.initialize_decoder_states(memory, memory_mask) |
| random_mask = (torch.rand(text_input.shape) < self.random_mask).to(text_input.device) |
| _text_input = text_input.clone() |
| _text_input.masked_fill_(random_mask, self.unk_index) |
| decoder_inputs = self.embedding(_text_input).transpose(0, 1) |
| start_embedding = self.embedding(torch.LongTensor([self.sos] * decoder_inputs.size(1)).to(decoder_inputs.device)) |
| decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0) |
| hidden_outputs, logit_outputs, alignments = [], [], [] |
| while len(hidden_outputs) < decoder_inputs.size(0): |
| decoder_input = decoder_inputs[len(hidden_outputs)] |
| hidden, logit, attention_weights = self.decode(decoder_input) |
| hidden_outputs += [hidden] |
| logit_outputs += [logit] |
| alignments += [attention_weights] |
| hidden_outputs = torch.stack(hidden_outputs).transpose(0, 1).contiguous() |
| logit_outputs = torch.stack(logit_outputs).transpose(0, 1).contiguous() |
| alignments = torch.stack(alignments).transpose(0, 1) |
| return hidden_outputs, logit_outputs, alignments |
|
|
| def decode(self, decoder_input): |
| cell_input = torch.cat((decoder_input, self.attention_context), -1) |
| self.decoder_hidden, self.decoder_cell = self.decoder_rnn(cell_input, (self.decoder_hidden, self.decoder_cell)) |
| attention_weights_cat = torch.cat((self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) |
| self.attention_context, self.attention_weights = self.attention_layer(self.decoder_hidden, self.memory, self.processed_memory, attention_weights_cat, self.mask) |
| self.attention_weights_cum += self.attention_weights |
| hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1) |
| hidden = self.project_to_hidden(hidden_and_context) |
| logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training)) |
| return hidden, logit, self.attention_weights |
|
|
|
|
| class ASRCNN(nn.Module): |
| def __init__(self, input_dim=80, hidden_dim=256, n_token=35, n_layers=6, token_embedding_dim=256): |
| super().__init__() |
| self.n_token = n_token |
| self.n_down = 1 |
| self.to_mfcc = MFCC() |
| self.init_cnn = ConvNorm(input_dim // 2, hidden_dim, kernel_size=7, padding=3, stride=2) |
| self.cnns = nn.Sequential(*[nn.Sequential(ConvBlock(hidden_dim), nn.GroupNorm(num_groups=1, num_channels=hidden_dim)) for _ in range(n_layers)]) |
| self.projection = ConvNorm(hidden_dim, hidden_dim // 2) |
| self.ctc_linear = nn.Sequential(LinearNorm(hidden_dim // 2, hidden_dim), nn.ReLU(), LinearNorm(hidden_dim, n_token)) |
| self.asr_s2s = ASRS2S(embedding_dim=token_embedding_dim, hidden_dim=hidden_dim // 2, n_token=n_token) |
|
|
| def forward(self, x, src_key_padding_mask=None, text_input=None): |
| x = self.to_mfcc(x) |
| x = self.init_cnn(x) |
| x = self.cnns(x) |
| x = self.projection(x) |
| x = x.transpose(1, 2) |
| ctc_logit = self.ctc_linear(x) |
| if text_input is not None: |
| _, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input) |
| return ctc_logit, s2s_logit, s2s_attn |
| else: |
| return ctc_logit |
|
|
|
|
| def load_ASR_models(ASR_MODEL_PATH, ASR_MODEL_CONFIG): |
| """Load ASR model.""" |
| with open(ASR_MODEL_CONFIG) as f: |
| config = yaml.safe_load(f) |
| model_config = config['model_params'] |
| model = ASRCNN(**model_config) |
| try: |
| ckpt = torch.load(ASR_MODEL_PATH, map_location="cpu", weights_only=False) |
| except TypeError: |
| ckpt = torch.load(ASR_MODEL_PATH, map_location="cpu") |
| params = ckpt["model"] |
| model.load_state_dict(params) |
| return model |
|
|
|
|
| |
| class ResBlock_JDC(nn.Module): |
| def __init__(self, in_channels, out_channels, leaky_relu_slope=0.01): |
| super().__init__() |
| self.downsample = in_channels != out_channels |
| self.pre_conv = nn.Sequential(nn.BatchNorm2d(num_features=in_channels), nn.LeakyReLU(leaky_relu_slope, inplace=True), nn.MaxPool2d(kernel_size=(1, 2))) |
| self.conv = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.LeakyReLU(leaky_relu_slope, inplace=True), nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False)) |
| self.conv1by1 = None |
| if self.downsample: |
| self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False) |
|
|
| def forward(self, x): |
| x = self.pre_conv(x) |
| if self.downsample: |
| x = self.conv(x) + self.conv1by1(x) |
| else: |
| x = self.conv(x) + x |
| return x |
|
|
|
|
| class JDCNet(nn.Module): |
| def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01): |
| super().__init__() |
| self.num_class = num_class |
| self.conv_block = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(leaky_relu_slope, inplace=True), nn.Conv2d(64, 64, 3, padding=1, bias=False)) |
| self.res_block1 = ResBlock_JDC(in_channels=64, out_channels=128) |
| self.res_block2 = ResBlock_JDC(in_channels=128, out_channels=192) |
| self.res_block3 = ResBlock_JDC(in_channels=192, out_channels=256) |
| self.pool_block = nn.Sequential(nn.BatchNorm2d(num_features=256), nn.LeakyReLU(leaky_relu_slope, inplace=True), nn.MaxPool2d(kernel_size=(1, 4)), nn.Dropout(p=0.2)) |
| |
| self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40)) |
| self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20)) |
| self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10)) |
| |
| self.detector_conv = nn.Sequential(nn.Conv2d(640, 256, 1, bias=False), nn.BatchNorm2d(256), nn.LeakyReLU(leaky_relu_slope, inplace=True), nn.Dropout(p=0.2)) |
| |
| self.bilstm_classifier = nn.LSTM(input_size=512, hidden_size=256, batch_first=True, bidirectional=True) |
| self.bilstm_detector = nn.LSTM(input_size=512, hidden_size=256, batch_first=True, bidirectional=True) |
| |
| self.classifier = nn.Linear(in_features=512, out_features=self.num_class) |
| self.detector = nn.Linear(in_features=512, out_features=2) |
|
|
| def forward(self, x): |
| seq_len = x.shape[-1] |
| x = x.float().transpose(-1, -2) |
| convblock_out = self.conv_block(x) |
| resblock1_out = self.res_block1(convblock_out) |
| resblock2_out = self.res_block2(resblock1_out) |
| resblock3_out = self.res_block3(resblock2_out) |
| poolblock_out = self.pool_block[0](resblock3_out) |
| poolblock_out = self.pool_block[1](poolblock_out) |
| GAN_feature = poolblock_out.transpose(-1, -2) |
| poolblock_out = self.pool_block[2](poolblock_out) |
| classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512)) |
| classifier_out, _ = self.bilstm_classifier(classifier_out) |
| classifier_out = classifier_out.contiguous().view((-1, 512)) |
| classifier_out = self.classifier(classifier_out) |
| classifier_out = classifier_out.view((-1, seq_len, self.num_class)) |
| return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out |
|
|
|
|
| def load_F0_models(path): |
| """Load F0 (pitch) model.""" |
| F0_model = JDCNet(num_class=1, seq_len=192) |
| params = torch.load(path, map_location='cpu')['net'] |
| F0_model.load_state_dict(params) |
| return F0_model |
|
|
|
|
| |
|
|
| def build_model(args, text_aligner, pitch_extractor, bert): |
| """Build the full TTS model.""" |
| assert args.decoder.type in ['istftnet', 'hifigan'], 'Decoder type unknown' |
|
|
| decoder = Decoder( |
| dim_in=args.hidden_dim, |
| style_dim=args.style_dim, |
| dim_out=args.n_mels, |
| resblock_kernel_sizes=args.decoder.resblock_kernel_sizes, |
| upsample_rates=args.decoder.upsample_rates, |
| upsample_initial_channel=args.decoder.upsample_initial_channel, |
| resblock_dilation_sizes=args.decoder.resblock_dilation_sizes, |
| upsample_kernel_sizes=args.decoder.upsample_kernel_sizes |
| ) |
|
|
| text_encoder = TextEncoder(channels=args.hidden_dim, kernel_size=5, depth=args.n_layer, n_symbols=args.n_token) |
| predictor = ProsodyPredictor(style_dim=args.style_dim, d_hid=args.hidden_dim, nlayers=args.n_layer, max_dur=args.max_dur, dropout=args.dropout) |
| style_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) |
| predictor_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) |
|
|
| if args.multispeaker: |
| transformer = StyleTransformer1d( |
| channels=args.style_dim * 2, |
| context_embedding_features=bert.config.hidden_size, |
| context_features=args.style_dim * 2, |
| **args.diffusion.transformer |
| ) |
| else: |
| transformer = Transformer1d( |
| channels=args.style_dim * 2, |
| context_embedding_features=bert.config.hidden_size, |
| **args.diffusion.transformer |
| ) |
|
|
| diffusion = AudioDiffusionConditional( |
| in_channels=1, |
| embedding_max_length=bert.config.max_position_embeddings, |
| embedding_features=bert.config.hidden_size, |
| embedding_mask_proba=args.diffusion.embedding_mask_proba, |
| channels=args.style_dim * 2, |
| context_features=args.style_dim * 2, |
| ) |
|
|
| diffusion.diffusion = KDiffusion( |
| net=diffusion.unet, |
| sigma_distribution=LogNormalDistribution(mean=args.diffusion.dist.mean, std=args.diffusion.dist.std), |
| sigma_data=args.diffusion.dist.sigma_data, |
| dynamic_threshold=0.0 |
| ) |
| diffusion.diffusion.net = transformer |
| diffusion.unet = transformer |
|
|
| nets = Munch( |
| bert=bert, |
| bert_encoder=nn.Linear(bert.config.hidden_size, args.hidden_dim), |
| predictor=predictor, |
| decoder=decoder, |
| text_encoder=text_encoder, |
| predictor_encoder=predictor_encoder, |
| style_encoder=style_encoder, |
| diffusion=diffusion, |
| text_aligner=text_aligner, |
| pitch_extractor=pitch_extractor, |
| ) |
|
|
| return nets |
|
|