################################ # Audio Parameters # ################################ sample_rate: 22050 hop_length: 256 win_length: 1024 n_mel_channels: 80 n_fft: 1024 mel_fmin: 0.0 mel_fmax: 8000.0 power: 1 normalized: False min_max_energy_norm: True norm: "slaney" mel_scale: "slaney" dynamic_range_compression: True mel_normalized: False min_f0: 65 #(torchaudio pyin values) max_f0: 2093 #(torchaudio pyin values) positive_weight: 5.0 lexicon: - AA - AE - AH - AO - AW - AY - B - CH - D - DH - EH - ER - EY - F - G - HH - IH - IY - JH - K - L - M - N - NG - OW - OY - P - R - S - SH - T - TH - UH - UW - V - W - Y - Z - ZH - ' ' n_symbols: 42 #fixed depending on symbols in the lexicon +1 for a dummy symbol used for padding padding_idx: 0 # Define model architecture d_model: 512 nhead: 8 num_encoder_layers: 6 num_decoder_layers: 6 dim_feedforward: 2048 dropout: 0.2 blank_index: 0 # This special token is for padding bos_index: 1 eos_index: 2 stop_weight: 0.45 stop_threshold: 0.5 ###################PRENET####################### enc_pre_net: !new:models.EncoderPrenet dec_pre_net: !new:models.DecoderPrenet encoder_emb: !new:torch.nn.Embedding num_embeddings: 128 embedding_dim: !ref padding_idx: !ref pos_emb_enc: !new:models.ScaledPositionalEncoding d_model: !ref decoder_emb: !new:torch.nn.Embedding num_embeddings: 128 embedding_dim: !ref padding_idx: !ref pos_emb_dec: !new:models.ScaledPositionalEncoding d_model: !ref Seq2SeqTransformer: !new:torch.nn.Transformer d_model: !ref nhead: !ref num_encoder_layers: !ref num_decoder_layers: !ref dim_feedforward: !ref dropout: !ref batch_first: True postnet: !new:models.PostNet mel_channels: !ref postnet_channels: 512 kernel_size: 5 postnet_layers: 5 mel_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref stop_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: 1 mel_spec_feats: !name:speechbrain.lobes.models.FastSpeech2.mel_spectogram sample_rate: !ref hop_length: !ref win_length: !ref n_fft: !ref n_mels: !ref f_min: !ref f_max: !ref power: !ref normalized: !ref min_max_energy_norm: !ref norm: !ref mel_scale: !ref compression: !ref modules: enc_pre_net: !ref encoder_emb: !ref pos_emb_enc: !ref dec_pre_net: !ref #decoder_emb: !ref pos_emb_dec: !ref Seq2SeqTransformer: !ref postnet: !ref mel_lin: !ref stop_lin: !ref model: !ref lookahead_mask: !name:speechbrain.lobes.models.transformer.Transformer.get_lookahead_mask padding_mask: !name:speechbrain.lobes.models.transformer.Transformer.get_key_padding_mask model: !new:torch.nn.ModuleList - [!ref , !ref , !ref , !ref , !ref , !ref , !ref , !ref , !ref ] label_encoder: !new:speechbrain.dataio.encoder.TextEncoder pretrained_path: /content/ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: model: !ref label_encoder: !ref paths: model: !ref /model.ckpt label_encoder: !ref /label_encoder.txt