Adel-Moumen commited on
Commit
dee4160
1 Parent(s): ee66d28

Delete hyperparams_develop.yaml

Browse files
Files changed (1) hide show
  1. hyperparams_develop.yaml +0 -162
hyperparams_develop.yaml DELETED
@@ -1,162 +0,0 @@
1
- # ############################################################################
2
- # Model: E2E ASR with Transformer
3
- # Encoder: Conformer Encoder
4
- # Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch + TransformerLM
5
- # Tokens: unigram
6
- # losses: CTC + KLdiv (Label Smoothing loss)
7
- # Training: KsponSpeech 965.2h
8
- # Based on the works of: Jianyuan Zhong, Titouan Parcollet 2021
9
- # Authors: Dongwon Kim, Dongwoo Kim 2021
10
- # ############################################################################
11
- # Seed needs to be set at top of yaml, before objects with parameters are made
12
-
13
- # Feature parameters
14
- sample_rate: 16000
15
- n_fft: 400
16
- n_mels: 40
17
-
18
- ####################### Model parameters ###########################
19
- # Transformer
20
- d_model: 256
21
- nhead: 4
22
- num_encoder_layers: 12
23
- num_decoder_layers: 6
24
- d_ffn: 2048
25
- transformer_dropout: 0.1
26
- activation: !name:torch.nn.GELU
27
- output_neurons: 5000
28
- vocab_size: 5000
29
-
30
- # Outputs
31
- blank_index: 0
32
- pad_index: 0
33
- bos_index: 1
34
- eos_index: 2
35
- unk_index: 0
36
-
37
- # Decoding parameters
38
- min_decode_ratio: 0.0
39
- max_decode_ratio: 1.0
40
- test_beam_size: 66
41
- lm_weight: 0.60
42
- ctc_weight_decode: 0.50
43
-
44
- ############################## models ################################
45
-
46
- normalizer: !new:speechbrain.processing.features.InputNormalization
47
- norm_type: global
48
-
49
- CNN: !new:speechbrain.lobes.models.convolution.ConvolutionFrontEnd
50
- input_shape: (8, 10, 80)
51
- num_blocks: 3
52
- num_layers_per_block: 1
53
- out_channels: (64, 64, 64)
54
- kernel_sizes: (5, 5, 1)
55
- strides: (2, 2, 1)
56
- residuals: (False, False, True)
57
-
58
- Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length
59
- input_size: 1280
60
- tgt_vocab: !ref <output_neurons>
61
- d_model: !ref <d_model>
62
- nhead: !ref <nhead>
63
- num_encoder_layers: !ref <num_encoder_layers>
64
- num_decoder_layers: !ref <num_decoder_layers>
65
- d_ffn: !ref <d_ffn>
66
- dropout: !ref <transformer_dropout>
67
- activation: !ref <activation>
68
- encoder_module: conformer
69
- attention_type: RelPosMHAXL
70
- normalize_before: True
71
- causal: False
72
-
73
- # NB: It has to match the pre-trained TransformerLM!!
74
- lm_model: !new:speechbrain.lobes.models.transformer.TransformerLM.TransformerLM # yamllint disable-line rule:line-length
75
- vocab: !ref <output_neurons>
76
- d_model: 768
77
- nhead: 12
78
- num_encoder_layers: 12
79
- num_decoder_layers: 0
80
- d_ffn: 3072
81
- dropout: 0.0
82
- activation: !name:torch.nn.GELU
83
- normalize_before: False
84
-
85
- tokenizer: !new:sentencepiece.SentencePieceProcessor
86
-
87
- ctc_lin: !new:speechbrain.nnet.linear.Linear
88
- input_size: !ref <d_model>
89
- n_neurons: !ref <output_neurons>
90
-
91
- seq_lin: !new:speechbrain.nnet.linear.Linear
92
- input_size: !ref <d_model>
93
- n_neurons: !ref <output_neurons>
94
-
95
- transformerlm_scorer: !new:speechbrain.decoders.scorer.TransformerLMScorer
96
- language_model: !ref <lm_model>
97
- temperature: 1.15
98
-
99
- ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer
100
- eos_index: !ref <eos_index>
101
- blank_index: !ref <blank_index>
102
- ctc_fc: !ref <ctc_lin>
103
-
104
- scorer: !new:speechbrain.decoders.scorer.ScorerBuilder
105
- full_scorers: [!ref <transformerlm_scorer>, !ref <ctc_scorer>]
106
- weights:
107
- transformerlm: !ref <lm_weight>
108
- ctc: !ref <ctc_weight_decode>
109
-
110
- decoder: !new:speechbrain.decoders.S2STransformerBeamSearcher
111
- modules: [!ref <Transformer>, !ref <seq_lin>]
112
- bos_index: !ref <bos_index>
113
- eos_index: !ref <eos_index>
114
- min_decode_ratio: !ref <min_decode_ratio>
115
- max_decode_ratio: !ref <max_decode_ratio>
116
- beam_size: !ref <test_beam_size>
117
- temperature: 1.15
118
- using_eos_threshold: False
119
- length_normalization: True
120
- scorer: !ref <scorer>
121
-
122
-
123
- Tencoder: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper
124
- transformer: !ref <Transformer>
125
-
126
- encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential
127
- input_shape: [null, null, !ref <n_mels>]
128
- compute_features: !ref <compute_features>
129
- normalize: !ref <normalizer>
130
- cnn: !ref <CNN>
131
- transformer_encoder: !ref <Tencoder>
132
-
133
- asr_model: !new:torch.nn.ModuleList
134
- - [!ref <CNN>, !ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>]
135
-
136
- log_softmax: !new:torch.nn.LogSoftmax
137
- dim: -1
138
-
139
-
140
- compute_features: !new:speechbrain.lobes.features.Fbank
141
- sample_rate: !ref <sample_rate>
142
- n_fft: !ref <n_fft>
143
- n_mels: !ref <n_mels>
144
-
145
- modules:
146
- compute_features: !ref <compute_features>
147
- normalizer: !ref <normalizer>
148
- pre_transformer: !ref <CNN>
149
- transformer: !ref <Transformer>
150
- asr_model: !ref <asr_model>
151
- lm_model: !ref <lm_model>
152
- encoder: !ref <encoder>
153
- decoder: !ref <decoder>
154
-
155
- # The pretrainer allows a mapping between pretrained files and instances that
156
- # are declared in the yaml.
157
- pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
158
- loadables:
159
- normalizer: !ref <normalizer>
160
- asr: !ref <asr_model>
161
- lm: !ref <lm_model>
162
- tokenizer: !ref <tokenizer>