Spaces:
Running
Running
File size: 4,233 Bytes
5a03f53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import logging
import torch
from trainer.io import load_fsspec
from TTS.encoder.models.resnet import ResNetSpeakerEncoder
from TTS.vocoder.models.hifigan_generator import HifiganGenerator
logger = logging.getLogger(__name__)
class HifiDecoder(torch.nn.Module):
def __init__(
self,
input_sample_rate=22050,
output_sample_rate=24000,
output_hop_length=256,
ar_mel_length_compression=1024,
decoder_input_dim=1024,
resblock_type_decoder="1",
resblock_dilation_sizes_decoder=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
resblock_kernel_sizes_decoder=[3, 7, 11],
upsample_rates_decoder=[8, 8, 2, 2],
upsample_initial_channel_decoder=512,
upsample_kernel_sizes_decoder=[16, 16, 4, 4],
d_vector_dim=512,
cond_d_vector_in_each_upsampling_layer=True,
speaker_encoder_audio_config={
"fft_size": 512,
"win_length": 400,
"hop_length": 160,
"sample_rate": 16000,
"preemphasis": 0.97,
"num_mels": 64,
},
):
super().__init__()
self.input_sample_rate = input_sample_rate
self.output_sample_rate = output_sample_rate
self.output_hop_length = output_hop_length
self.ar_mel_length_compression = ar_mel_length_compression
self.speaker_encoder_audio_config = speaker_encoder_audio_config
self.waveform_decoder = HifiganGenerator(
decoder_input_dim,
1,
resblock_type_decoder,
resblock_dilation_sizes_decoder,
resblock_kernel_sizes_decoder,
upsample_kernel_sizes_decoder,
upsample_initial_channel_decoder,
upsample_rates_decoder,
inference_padding=0,
cond_channels=d_vector_dim,
conv_pre_weight_norm=False,
conv_post_weight_norm=False,
conv_post_bias=False,
cond_in_each_up_layer=cond_d_vector_in_each_upsampling_layer,
)
self.speaker_encoder = ResNetSpeakerEncoder(
input_dim=64,
proj_dim=512,
log_input=True,
use_torch_spec=True,
audio_config=speaker_encoder_audio_config,
)
@property
def device(self):
return next(self.parameters()).device
def forward(self, latents, g=None):
"""
Args:
x (Tensor): feature input tensor (GPT latent).
g (Tensor): global conditioning input tensor.
Returns:
Tensor: output waveform.
Shapes:
x: [B, C, T]
Tensor: [B, 1, T]
"""
z = torch.nn.functional.interpolate(
latents.transpose(1, 2),
scale_factor=[self.ar_mel_length_compression / self.output_hop_length],
mode="linear",
).squeeze(1)
# upsample to the right sr
if self.output_sample_rate != self.input_sample_rate:
z = torch.nn.functional.interpolate(
z,
scale_factor=[self.output_sample_rate / self.input_sample_rate],
mode="linear",
).squeeze(0)
o = self.waveform_decoder(z, g=g)
return o
@torch.inference_mode()
def inference(self, c, g):
"""
Args:
x (Tensor): feature input tensor (GPT latent).
g (Tensor): global conditioning input tensor.
Returns:
Tensor: output waveform.
Shapes:
x: [B, C, T]
Tensor: [B, 1, T]
"""
return self.forward(c, g=g)
def load_checkpoint(self, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin
state = load_fsspec(checkpoint_path, map_location=torch.device("cpu"))
# remove unused keys
state = state["model"]
states_keys = list(state.keys())
for key in states_keys:
if "waveform_decoder." not in key and "speaker_encoder." not in key:
del state[key]
self.load_state_dict(state)
if eval:
self.eval()
assert not self.training
self.waveform_decoder.remove_weight_norm()
|