1
# ############################################################################
2
# Model: E2E ASR with Transformer
3
# Encoder: Transformer Encoder
4
# Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM
5
# Tokens: unigram
6
# losses: CTC + KLdiv (Label Smoothing loss)
7
# Training: Librispeech 960h
8
# Authors:  Jianyuan Zhong, Titouan Parcollet 2021
9
# ############################################################################
10
11
# Feature parameters
12
sample_rate: 16000
13
n_fft: 400
14
n_mels: 80
15
16
####################### Model parameters ###########################
17
# Transformer
18
d_model: 768
19
nhead: 8
20
num_encoder_layers: 12
21
num_decoder_layers: 6
22
d_ffn: 3072
23
transformer_dropout: 0.0
24
activation: !name:torch.nn.GELU
25
output_neurons: 5000
26
vocab_size: 5000
27
28
# Outputs
29
blank_index: 0
30
label_smoothing: 0.1
31
pad_index: 0
32
bos_index: 1
33
eos_index: 2
34
unk_index: 0
35
36
# Decoding parameters
37
min_decode_ratio: 0.0
38
max_decode_ratio: 1.0
39
valid_search_interval: 10
40
valid_beam_size: 10
41
test_beam_size: 10
42
lm_weight: 0.60
43
ctc_weight_decode: 0.52
44
45
############################## models ################################
46
47
CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd
48
    input_shape: (8, 10, 80)
49
    num_blocks: 3
50
    num_layers_per_block: 1
51
    out_channels: (128, 256, 512)
52
    kernel_sizes: (3, 3, 1)
53
    strides: (2, 2, 1)
54
    residuals: (False, False, False)
55
56
Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR
57
    input_size: 10240
58
    tgt_vocab: !ref <output_neurons>
59
    d_model: !ref <d_model>
60
    nhead: !ref <nhead>
61
    num_encoder_layers: !ref <num_encoder_layers>
62
    num_decoder_layers: !ref <num_decoder_layers>
63
    d_ffn: !ref <d_ffn>
64
    dropout: !ref <transformer_dropout>
65
    activation: !ref <activation>
66
    normalize_before: False
67
68
ctc_lin: !new:speechbrain.nnet.linear.Linear
69
    input_size: !ref <d_model>
70
    n_neurons: !ref <output_neurons>
71
72
seq_lin: !new:speechbrain.nnet.linear.Linear
73
    input_size: !ref <d_model>
74
    n_neurons: !ref <output_neurons>
75
76
decoder: !new:speechbrain.decoders.S2STransformerBeamSearch
77
    modules: [!ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>]
78
    bos_index: !ref <bos_index>
79
    eos_index: !ref <eos_index>
80
    blank_index: !ref <blank_index>
81
    min_decode_ratio: !ref <min_decode_ratio>
82
    max_decode_ratio: !ref <max_decode_ratio>
83
    beam_size: !ref <test_beam_size>
84
    ctc_weight: !ref <ctc_weight_decode>
85
    lm_weight: !ref <lm_weight>
86
    lm_modules: !ref <lm_model>
87
    temperature: 1.15
88
    temperature_lm: 1.15
89
    using_eos_threshold: False
90
    length_normalization: True
91
92
log_softmax: !new:torch.nn.LogSoftmax
93
    dim: -1
94
95
normalizer: !new:speechbrain.processing.features.InputNormalization
96
    norm_type: global
97
98
compute_features: !new:speechbrain.lobes.features.Fbank
99
    sample_rate: !ref <sample_rate>
100
    n_fft: !ref <n_fft>
101
    n_mels: !ref <n_mels>
102
103
# This is the Transformer LM that is used according to the Huggingface repository
104
# Visit the HuggingFace model corresponding to the pretrained_lm_tokenizer_path
105
# For more details about the model!
106
# NB: It has to match the pre-trained TransformerLM!!
107
lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM
108
    vocab: 5000
109
    d_model: 768
110
    nhead: 12
111
    num_encoder_layers: 12
112
    num_decoder_layers: 0
113
    d_ffn: 3072
114
    dropout: 0.0
115
    activation: !name:torch.nn.GELU
116
    normalize_before: False
117
118
tokenizer: !new:sentencepiece.SentencePieceProcessor
119
120
Tencoder: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper
121
    transformer: !ref <Transformer>
122
123
encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential
124
    input_shape: [null, null, !ref <n_mels>]
125
    compute_features: !ref <compute_features>
126
    normalize: !ref <normalizer>
127
    cnn: !ref <CNN>
128
    transformer_encoder: !ref <Tencoder>
129
130
# Models
131
asr_model: !new:torch.nn.ModuleList
132
    - [!ref <CNN>, !ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>]
133
134
modules:
135
   compute_features: !ref <compute_features>
136
   normalizer: !ref <normalizer>
137
   pre_transformer: !ref <CNN>
138
   transformer: !ref <Transformer>
139
   asr_model: !ref <asr_model>
140
   lm_model: !ref <lm_model>
141
   encoder: !ref <encoder>
142
   decoder: !ref <decoder>
143
144
# The pretrainer allows a mapping between pretrained files and instances that
145
# are declared in the yaml.
146
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
147
   loadables:
148
      normalizer: !ref <normalizer>
149
      asr: !ref <asr_model>
150
      lm: !ref <lm_model>
151
      tokenizer: !ref <tokenizer>
152