Spaces:
Running
on
Zero
Running
on
Zero
Hecheng0625
commited on
Commit
•
b38e3ae
1
Parent(s):
4dfbe06
Update Amphion/models/ns3_codec/facodec.py
Browse files
Amphion/models/ns3_codec/facodec.py
CHANGED
@@ -591,3 +591,173 @@ class FACodecDecoder(nn.Module):
|
|
591 |
|
592 |
def reset_parameters(self):
|
593 |
self.apply(init_weights)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
591 |
|
592 |
def reset_parameters(self):
|
593 |
self.apply(init_weights)
|
594 |
+
|
595 |
+
|
596 |
+
class FACodecRedecoder(nn.Module):
|
597 |
+
def __init__(
|
598 |
+
self,
|
599 |
+
in_channels=256,
|
600 |
+
upsample_initial_channel=1280,
|
601 |
+
up_ratios=(5, 5, 4, 2),
|
602 |
+
vq_num_q_c=2,
|
603 |
+
vq_num_q_p=1,
|
604 |
+
vq_num_q_r=3,
|
605 |
+
vq_dim=256,
|
606 |
+
codebook_size_prosody=10,
|
607 |
+
codebook_size_content=10,
|
608 |
+
codebook_size_residual=10,
|
609 |
+
):
|
610 |
+
super().__init__()
|
611 |
+
self.hop_length = np.prod(up_ratios)
|
612 |
+
self.up_ratios = up_ratios
|
613 |
+
|
614 |
+
self.vq_num_q_p = vq_num_q_p
|
615 |
+
self.vq_num_q_c = vq_num_q_c
|
616 |
+
self.vq_num_q_r = vq_num_q_r
|
617 |
+
|
618 |
+
self.vq_dim = vq_dim
|
619 |
+
|
620 |
+
self.codebook_size_prosody = codebook_size_prosody
|
621 |
+
self.codebook_size_content = codebook_size_content
|
622 |
+
self.codebook_size_residual = codebook_size_residual
|
623 |
+
|
624 |
+
self.prosody_embs = nn.ModuleList()
|
625 |
+
for i in range(self.vq_num_q_p):
|
626 |
+
emb_tokens = nn.Embedding(
|
627 |
+
num_embeddings=2**self.codebook_size_prosody,
|
628 |
+
embedding_dim=self.vq_dim,
|
629 |
+
)
|
630 |
+
emb_tokens.weight.data.normal_(mean=0.0, std=1e-5)
|
631 |
+
self.prosody_embs.append(emb_tokens)
|
632 |
+
self.content_embs = nn.ModuleList()
|
633 |
+
for i in range(self.vq_num_q_c):
|
634 |
+
emb_tokens = nn.Embedding(
|
635 |
+
num_embeddings=2**self.codebook_size_content,
|
636 |
+
embedding_dim=self.vq_dim,
|
637 |
+
)
|
638 |
+
emb_tokens.weight.data.normal_(mean=0.0, std=1e-5)
|
639 |
+
self.content_embs.append(emb_tokens)
|
640 |
+
self.residual_embs = nn.ModuleList()
|
641 |
+
for i in range(self.vq_num_q_r):
|
642 |
+
emb_tokens = nn.Embedding(
|
643 |
+
num_embeddings=2**self.codebook_size_residual,
|
644 |
+
embedding_dim=self.vq_dim,
|
645 |
+
)
|
646 |
+
emb_tokens.weight.data.normal_(mean=0.0, std=1e-5)
|
647 |
+
self.residual_embs.append(emb_tokens)
|
648 |
+
|
649 |
+
# Add first conv layer
|
650 |
+
channels = upsample_initial_channel
|
651 |
+
layers = [WNConv1d(in_channels, channels, kernel_size=7, padding=3)]
|
652 |
+
|
653 |
+
# Add upsampling + MRF blocks
|
654 |
+
for i, stride in enumerate(up_ratios):
|
655 |
+
input_dim = channels // 2**i
|
656 |
+
output_dim = channels // 2 ** (i + 1)
|
657 |
+
layers += [DecoderBlock(input_dim, output_dim, stride)]
|
658 |
+
|
659 |
+
# Add final conv layer
|
660 |
+
layers += [
|
661 |
+
Activation1d(activation=SnakeBeta(output_dim, alpha_logscale=True)),
|
662 |
+
WNConv1d(output_dim, 1, kernel_size=7, padding=3),
|
663 |
+
nn.Tanh(),
|
664 |
+
]
|
665 |
+
|
666 |
+
self.model = nn.Sequential(*layers)
|
667 |
+
|
668 |
+
self.timbre_linear = nn.Linear(in_channels, in_channels * 2)
|
669 |
+
self.timbre_linear.bias.data[:in_channels] = 1
|
670 |
+
self.timbre_linear.bias.data[in_channels:] = 0
|
671 |
+
self.timbre_norm = nn.LayerNorm(in_channels, elementwise_affine=False)
|
672 |
+
|
673 |
+
self.timbre_cond_prosody_enc = TransformerEncoder(
|
674 |
+
enc_emb_tokens=None,
|
675 |
+
encoder_layer=4,
|
676 |
+
encoder_hidden=256,
|
677 |
+
encoder_head=4,
|
678 |
+
conv_filter_size=1024,
|
679 |
+
conv_kernel_size=5,
|
680 |
+
encoder_dropout=0.1,
|
681 |
+
use_cln=True,
|
682 |
+
cfg=None,
|
683 |
+
)
|
684 |
+
|
685 |
+
def forward(
|
686 |
+
self,
|
687 |
+
vq,
|
688 |
+
speaker_embedding,
|
689 |
+
use_residual_code=False,
|
690 |
+
):
|
691 |
+
|
692 |
+
x = 0
|
693 |
+
|
694 |
+
x_p = 0
|
695 |
+
for i in range(self.vq_num_q_p):
|
696 |
+
x_p = x_p + self.prosody_embs[i](vq[i]) # (B, T, d)
|
697 |
+
spk_cond = speaker_embedding.unsqueeze(1).expand(-1, x_p.shape[1], -1)
|
698 |
+
x_p = self.timbre_cond_prosody_enc(
|
699 |
+
x_p, key_padding_mask=None, condition=spk_cond
|
700 |
+
)
|
701 |
+
x = x + x_p
|
702 |
+
|
703 |
+
x_c = 0
|
704 |
+
for i in range(self.vq_num_q_c):
|
705 |
+
x_c = x_c + self.content_embs[i](vq[self.vq_num_q_p + i])
|
706 |
+
|
707 |
+
x = x + x_c
|
708 |
+
|
709 |
+
if use_residual_code:
|
710 |
+
|
711 |
+
x_r = 0
|
712 |
+
for i in range(self.vq_num_q_r):
|
713 |
+
x_r = x_r + self.residual_embs[i](
|
714 |
+
vq[self.vq_num_q_p + self.vq_num_q_c + i]
|
715 |
+
)
|
716 |
+
x = x + x_r
|
717 |
+
|
718 |
+
style = self.timbre_linear(speaker_embedding).unsqueeze(2) # (B, 2d, 1)
|
719 |
+
gamma, beta = style.chunk(2, 1) # (B, d, 1)
|
720 |
+
x = x.transpose(1, 2)
|
721 |
+
x = self.timbre_norm(x)
|
722 |
+
x = x.transpose(1, 2)
|
723 |
+
x = x * gamma + beta
|
724 |
+
x = self.model(x)
|
725 |
+
|
726 |
+
return x
|
727 |
+
|
728 |
+
def vq2emb(self, vq, speaker_embedding, use_residual=True):
|
729 |
+
|
730 |
+
out = 0
|
731 |
+
|
732 |
+
x_t = 0
|
733 |
+
for i in range(self.vq_num_q_p):
|
734 |
+
x_t += self.prosody_embs[i](vq[i]) # (B, T, d)
|
735 |
+
spk_cond = speaker_embedding.unsqueeze(1).expand(-1, x_t.shape[1], -1)
|
736 |
+
x_t = self.timbre_cond_prosody_enc(
|
737 |
+
x_t, key_padding_mask=None, condition=spk_cond
|
738 |
+
)
|
739 |
+
|
740 |
+
# prosody
|
741 |
+
out += x_t
|
742 |
+
|
743 |
+
# content
|
744 |
+
for i in range(self.vq_num_q_c):
|
745 |
+
out += self.content_embs[i](vq[self.vq_num_q_p + i])
|
746 |
+
|
747 |
+
# residual
|
748 |
+
if use_residual:
|
749 |
+
for i in range(self.vq_num_q_r):
|
750 |
+
out += self.residual_embs[i](vq[self.vq_num_q_p + self.vq_num_q_c + i])
|
751 |
+
|
752 |
+
out = out.transpose(1, 2) # (B, T, d) -> (B, d, T)
|
753 |
+
return out
|
754 |
+
|
755 |
+
def inference(self, x, speaker_embedding):
|
756 |
+
style = self.timbre_linear(speaker_embedding).unsqueeze(2) # (B, 2d, 1)
|
757 |
+
gamma, beta = style.chunk(2, 1) # (B, d, 1)
|
758 |
+
x = x.transpose(1, 2)
|
759 |
+
x = self.timbre_norm(x)
|
760 |
+
x = x.transpose(1, 2)
|
761 |
+
x = x * gamma + beta
|
762 |
+
x = self.model(x)
|
763 |
+
return x
|