Adel-Moumen commited on
Commit
315aa11
1 Parent(s): c6d256b

Update hyperparams_develop.yaml

Browse files
Files changed (1) hide show
  1. hyperparams_develop.yaml +135 -0
hyperparams_develop.yaml CHANGED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ############################################################################
2
+ # Model: E2E ASR with Transformer
3
+ # Encoder: Transformer Encoder
4
+ # Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch
5
+ # Tokens: BPE with unigram
6
+ # losses: CTC + KLdiv (Label Smoothing loss)
7
+ # Training: AISHELL-1
8
+ # Authors: Jianyuan Zhong, Titouan Parcollet
9
+ # ############################################################################
10
+
11
+ # Feature parameters
12
+ sample_rate: 16000
13
+ n_fft: 400
14
+ n_mels: 80
15
+
16
+ ####################### Model parameters ###########################
17
+ # Transformer
18
+ d_model: 256
19
+ nhead: 4
20
+ num_encoder_layers: 12
21
+ num_decoder_layers: 6
22
+ d_ffn: 2048
23
+ transformer_dropout: 0.1
24
+ activation: !name:torch.nn.GELU
25
+ output_neurons: 5000
26
+ vocab_size: 5000
27
+
28
+ # Outputs
29
+ blank_index: 0
30
+ label_smoothing: 0.1
31
+ pad_index: 0
32
+ bos_index: 1
33
+ eos_index: 2
34
+ unk_index: 0
35
+
36
+ # Decoding parameters
37
+ min_decode_ratio: 0.0
38
+ max_decode_ratio: 1.0 # 1.0
39
+ valid_search_interval: 10
40
+ valid_beam_size: 10
41
+ test_beam_size: 10
42
+ ctc_weight_decode: 0.40
43
+
44
+ ############################## models ################################
45
+
46
+ compute_features: !new:speechbrain.lobes.features.Fbank
47
+ sample_rate: !ref <sample_rate>
48
+ n_fft: !ref <n_fft>
49
+ n_mels: !ref <n_mels>
50
+
51
+ normalizer: !new:speechbrain.processing.features.InputNormalization
52
+ norm_type: global
53
+
54
+
55
+ CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd
56
+ input_shape: (8, 10, 80)
57
+ num_blocks: 2
58
+ num_layers_per_block: 1
59
+ out_channels: (256, 256)
60
+ kernel_sizes: (3, 3)
61
+ strides: (2, 2)
62
+ residuals: (False, False)
63
+ norm: !name:speechbrain.nnet.normalization.BatchNorm2d
64
+
65
+ Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length
66
+ input_size: 5120
67
+ tgt_vocab: !ref <output_neurons>
68
+ d_model: !ref <d_model>
69
+ nhead: !ref <nhead>
70
+ num_encoder_layers: !ref <num_encoder_layers>
71
+ num_decoder_layers: !ref <num_decoder_layers>
72
+ d_ffn: !ref <d_ffn>
73
+ dropout: !ref <transformer_dropout>
74
+ activation: !ref <activation>
75
+ normalize_before: True
76
+
77
+ ctc_lin: !new:speechbrain.nnet.linear.Linear
78
+ input_size: !ref <d_model>
79
+ n_neurons: !ref <output_neurons>
80
+
81
+ seq_lin: !new:speechbrain.nnet.linear.Linear
82
+ input_size: !ref <d_model>
83
+ n_neurons: !ref <output_neurons>
84
+
85
+ tokenizer: !new:sentencepiece.SentencePieceProcessor
86
+
87
+ asr_model: !new:torch.nn.ModuleList
88
+ - [!ref <CNN>, !ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>]
89
+
90
+ # Here, we extract the encoder from the Transformer model
91
+ Tencoder: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper
92
+ transformer: !ref <Transformer>
93
+
94
+ # We compose the inference (encoder) pipeline.
95
+ encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential
96
+ input_shape: [null, null, !ref <n_mels>]
97
+ compute_features: !ref <compute_features>
98
+ normalize: !ref <normalizer>
99
+ cnn: !ref <CNN>
100
+ transformer_encoder: !ref <Tencoder>
101
+
102
+ ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer
103
+ eos_index: !ref <eos_index>
104
+ blank_index: !ref <blank_index>
105
+ ctc_fc: !ref <ctc_lin>
106
+
107
+ scorer: !new:speechbrain.decoders.scorer.ScorerBuilder
108
+ full_scorers: [!ref <ctc_scorer>]
109
+ weights:
110
+ ctc: !ref <ctc_weight_decode>
111
+
112
+ decoder: !new:speechbrain.decoders.S2STransformerBeamSearcher
113
+ modules: [!ref <Transformer>, !ref <seq_lin>]
114
+ bos_index: !ref <bos_index>
115
+ eos_index: !ref <eos_index>
116
+ min_decode_ratio: !ref <min_decode_ratio>
117
+ max_decode_ratio: !ref <max_decode_ratio>
118
+ beam_size: !ref <test_beam_size>
119
+ using_eos_threshold: False
120
+ length_normalization: True
121
+ scorer: !ref <scorer>
122
+
123
+ modules:
124
+ normalizer: !ref <normalizer>
125
+ encoder: !ref <encoder>
126
+ decoder: !ref <decoder>
127
+
128
+ log_softmax: !new:torch.nn.LogSoftmax
129
+ dim: -1
130
+
131
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
132
+ loadables:
133
+ normalizer: !ref <normalizer>
134
+ asr: !ref <asr_model>
135
+ tokenizer: !ref <tokenizer>