File size: 3,937 Bytes
829dfcc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
# ################################
# Model: Transducer ASR
# Augmentation: SpecAugment
# Authors: Pooneh Mousavi 2023
# ################################
# Feature parameters (FBANKS etc)
sample_rate: 16000
n_fft: 400
n_mels: 80
# Model parameters
activation: !name:torch.nn.LeakyReLU
dropout: 0.15
cnn_blocks: 3
cnn_channels: (128, 200, 256)
inter_layer_pooling_size: (2, 2, 2)
cnn_kernelsize: (3, 3)
time_pooling_size: 4
rnn_class: !name:speechbrain.nnet.RNN.LSTM
rnn_layers: 5
rnn_neurons: 1024
rnn_bidirectional: True
dnn_blocks: 2
dnn_neurons: 1024
dec_neurons: 1024
joint_dim: 1024
# Outputs
output_neurons: 1000 # BPE size, index(blank/eos/bos) = 0
# transducer_beam_search : True
# Decoding parameters
# Be sure that the bos and eos index match with the BPEs ones
blank_index: 0
bos_index: 0
eos_index: 0
min_decode_ratio: 0.0
max_decode_ratio: 1.0
beam_size: 4
nbest: 1
# by default {state,expand}_beam = 2.3 as mention in paper
# https://arxiv.org/abs/1904.02619
state_beam: 2.3
expand_beam: 2.3
transducer_beam_search: True
normalizer: !new:speechbrain.processing.features.InputNormalization
norm_type: global
compute_features: !new:speechbrain.lobes.features.Fbank
sample_rate: !ref <sample_rate>
n_fft: !ref <n_fft>
n_mels: !ref <n_mels>
enc: !new:speechbrain.lobes.models.CRDNN.CRDNN
input_shape: [null, null, !ref <n_mels>]
activation: !ref <activation>
dropout: !ref <dropout>
cnn_blocks: !ref <cnn_blocks>
cnn_channels: !ref <cnn_channels>
cnn_kernelsize: !ref <cnn_kernelsize>
inter_layer_pooling_size: !ref <inter_layer_pooling_size>
time_pooling: True
using_2d_pooling: False
time_pooling_size: !ref <time_pooling_size>
rnn_class: !ref <rnn_class>
rnn_layers: !ref <rnn_layers>
rnn_neurons: !ref <rnn_neurons>
rnn_bidirectional: !ref <rnn_bidirectional>
rnn_re_init: True
dnn_blocks: !ref <dnn_blocks>
dnn_neurons: !ref <dnn_neurons>
enc_lin: !new:speechbrain.nnet.linear.Linear
input_size: !ref <dnn_neurons>
n_neurons: !ref <joint_dim>
emb: !new:speechbrain.nnet.embedding.Embedding
num_embeddings: !ref <output_neurons>
consider_as_one_hot: True
blank_id: !ref <blank_index>
dec: !new:speechbrain.nnet.RNN.GRU
input_shape: [null, null, !ref <output_neurons> - 1]
hidden_size: !ref <dec_neurons>
num_layers: 1
re_init: True
# For MTL with LM over the decoder
dec_lin: !new:speechbrain.nnet.linear.Linear
input_size: !ref <dec_neurons>
n_neurons: !ref <joint_dim>
bias: False
Tjoint: !new:speechbrain.nnet.transducer.transducer_joint.Transducer_joint
joint: sum # joint [sum | concat]
nonlinearity: !ref <activation>
transducer_lin: !new:speechbrain.nnet.linear.Linear
input_size: !ref <joint_dim>
n_neurons: !ref <output_neurons>
bias: False
log_softmax: !new:speechbrain.nnet.activations.Softmax
apply_log: True
asr_model: !new:torch.nn.ModuleList
- [!ref <enc>, !ref <emb>, !ref <dec>, !ref <transducer_lin>]
tokenizer: !new:sentencepiece.SentencePieceProcessor
# We compose the inference (encoder) pipeline.
encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential
input_shape: [null, null, !ref <n_mels>]
compute_features: !ref <compute_features>
normalize: !ref <normalizer>
model: !ref <enc>
decoder: !new:speechbrain.decoders.transducer.TransducerBeamSearcher
decode_network_lst: [!ref <emb>, !ref <dec>]
tjoint: !ref <Tjoint>
classifier_network: [!ref <transducer_lin>]
blank_id: !ref <blank_index>
beam_size: !ref <beam_size>
nbest: !ref <nbest>
state_beam: !ref <state_beam>
expand_beam: !ref <expand_beam>
modules:
normalizer: !ref <normalizer>
encoder: !ref <encoder>
decoder: !ref <decoder>
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
loadables:
normalizer: !ref <normalizer>
asr: !ref <asr_model>
tokenizer: !ref <tokenizer> |