patrickvonplaten commited on
Commit
c0d1392
1 Parent(s): bc210d8
config.json ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SpeechEncoderDecoderModel"
4
+ ],
5
+ "decoder": {
6
+ "_name_or_path": "facebook/bart-large",
7
+ "activation_dropout": 0.1,
8
+ "activation_function": "gelu",
9
+ "add_bias_logits": false,
10
+ "add_cross_attention": true,
11
+ "add_final_layer_norm": false,
12
+ "architectures": [
13
+ "BartModel"
14
+ ],
15
+ "attention_dropout": 0.1,
16
+ "bad_words_ids": null,
17
+ "bos_token_id": 0,
18
+ "chunk_size_feed_forward": 0,
19
+ "classif_dropout": 0.1,
20
+ "classifier_dropout": 0.0,
21
+ "cross_attention_hidden_size": null,
22
+ "d_model": 1024,
23
+ "decoder_attention_heads": 16,
24
+ "decoder_ffn_dim": 4096,
25
+ "decoder_layerdrop": 0.0,
26
+ "decoder_layers": 12,
27
+ "decoder_start_token_id": 2,
28
+ "diversity_penalty": 0.0,
29
+ "do_sample": false,
30
+ "dropout": 0.1,
31
+ "early_stopping": true,
32
+ "encoder_attention_heads": 16,
33
+ "encoder_ffn_dim": 4096,
34
+ "encoder_layerdrop": 0.0,
35
+ "encoder_layers": 12,
36
+ "encoder_no_repeat_ngram_size": 0,
37
+ "eos_token_id": 2,
38
+ "finetuning_task": null,
39
+ "forced_bos_token_id": null,
40
+ "forced_eos_token_id": 2,
41
+ "gradient_checkpointing": false,
42
+ "id2label": {
43
+ "0": "LABEL_0",
44
+ "1": "LABEL_1",
45
+ "2": "LABEL_2"
46
+ },
47
+ "init_std": 0.02,
48
+ "is_decoder": true,
49
+ "is_encoder_decoder": false,
50
+ "label2id": {
51
+ "LABEL_0": 0,
52
+ "LABEL_1": 1,
53
+ "LABEL_2": 2
54
+ },
55
+ "length_penalty": 1.0,
56
+ "max_length": 20,
57
+ "max_position_embeddings": 1024,
58
+ "min_length": 0,
59
+ "model_type": "bart",
60
+ "no_repeat_ngram_size": 3,
61
+ "normalize_before": false,
62
+ "num_beam_groups": 1,
63
+ "num_beams": 4,
64
+ "num_hidden_layers": 12,
65
+ "num_return_sequences": 1,
66
+ "output_attentions": false,
67
+ "output_hidden_states": false,
68
+ "output_scores": false,
69
+ "pad_token_id": 1,
70
+ "prefix": null,
71
+ "problem_type": null,
72
+ "pruned_heads": {},
73
+ "remove_invalid_values": false,
74
+ "repetition_penalty": 1.0,
75
+ "return_dict": true,
76
+ "return_dict_in_generate": false,
77
+ "scale_embedding": false,
78
+ "sep_token_id": null,
79
+ "task_specific_params": {
80
+ "summarization": {
81
+ "length_penalty": 1.0,
82
+ "max_length": 128,
83
+ "min_length": 12,
84
+ "num_beams": 4
85
+ },
86
+ "summarization_cnn": {
87
+ "length_penalty": 2.0,
88
+ "max_length": 142,
89
+ "min_length": 56,
90
+ "num_beams": 4
91
+ },
92
+ "summarization_xsum": {
93
+ "length_penalty": 1.0,
94
+ "max_length": 62,
95
+ "min_length": 11,
96
+ "num_beams": 6
97
+ }
98
+ },
99
+ "temperature": 1.0,
100
+ "tie_encoder_decoder": false,
101
+ "tie_word_embeddings": true,
102
+ "tokenizer_class": null,
103
+ "top_k": 50,
104
+ "top_p": 1.0,
105
+ "torch_dtype": null,
106
+ "torchscript": false,
107
+ "transformers_version": "4.15.0.dev0",
108
+ "use_bfloat16": false,
109
+ "use_cache": true,
110
+ "vocab_size": 50265
111
+ },
112
+ "decoder_start_token_id": 0,
113
+ "encoder": {
114
+ "_name_or_path": "facebook/wav2vec2-large-lv60",
115
+ "activation_dropout": 0.1,
116
+ "adapter_kernel_size": 3,
117
+ "adapter_stride": 2,
118
+ "add_adapter": false,
119
+ "add_cross_attention": false,
120
+ "apply_spec_augment": true,
121
+ "architectures": [
122
+ "Wav2Vec2ForPreTraining"
123
+ ],
124
+ "attention_dropout": 0.1,
125
+ "bad_words_ids": null,
126
+ "bos_token_id": 1,
127
+ "chunk_size_feed_forward": 0,
128
+ "classifier_proj_size": 256,
129
+ "codevector_dim": 768,
130
+ "contrastive_logits_temperature": 0.1,
131
+ "conv_bias": true,
132
+ "conv_dim": [
133
+ 512,
134
+ 512,
135
+ 512,
136
+ 512,
137
+ 512,
138
+ 512,
139
+ 512
140
+ ],
141
+ "conv_kernel": [
142
+ 10,
143
+ 3,
144
+ 3,
145
+ 3,
146
+ 3,
147
+ 2,
148
+ 2
149
+ ],
150
+ "conv_stride": [
151
+ 5,
152
+ 2,
153
+ 2,
154
+ 2,
155
+ 2,
156
+ 2,
157
+ 2
158
+ ],
159
+ "cross_attention_hidden_size": null,
160
+ "ctc_loss_reduction": "sum",
161
+ "ctc_zero_infinity": false,
162
+ "decoder_start_token_id": null,
163
+ "diversity_loss_weight": 0.1,
164
+ "diversity_penalty": 0.0,
165
+ "do_sample": false,
166
+ "do_stable_layer_norm": true,
167
+ "early_stopping": false,
168
+ "encoder_no_repeat_ngram_size": 0,
169
+ "eos_token_id": 2,
170
+ "feat_extract_activation": "gelu",
171
+ "feat_extract_dropout": 0.0,
172
+ "feat_extract_norm": "layer",
173
+ "feat_proj_dropout": 0.0,
174
+ "feat_quantizer_dropout": 0.0,
175
+ "final_dropout": 0.1,
176
+ "finetuning_task": null,
177
+ "forced_bos_token_id": null,
178
+ "forced_eos_token_id": null,
179
+ "gradient_checkpointing": false,
180
+ "hidden_act": "gelu",
181
+ "hidden_dropout": 0.1,
182
+ "hidden_dropout_prob": 0.1,
183
+ "hidden_size": 1024,
184
+ "id2label": {
185
+ "0": "LABEL_0",
186
+ "1": "LABEL_1"
187
+ },
188
+ "initializer_range": 0.02,
189
+ "intermediate_size": 4096,
190
+ "is_decoder": false,
191
+ "is_encoder_decoder": false,
192
+ "label2id": {
193
+ "LABEL_0": 0,
194
+ "LABEL_1": 1
195
+ },
196
+ "layer_norm_eps": 1e-05,
197
+ "layerdrop": 0.0,
198
+ "length_penalty": 1.0,
199
+ "mask_feature_length": 10,
200
+ "mask_feature_min_masks": 0,
201
+ "mask_feature_prob": 0.0,
202
+ "mask_time_length": 10,
203
+ "mask_time_min_masks": 2,
204
+ "mask_time_prob": 0.0,
205
+ "max_length": 20,
206
+ "min_length": 0,
207
+ "model_type": "wav2vec2",
208
+ "no_repeat_ngram_size": 0,
209
+ "num_adapter_layers": 3,
210
+ "num_attention_heads": 16,
211
+ "num_beam_groups": 1,
212
+ "num_beams": 1,
213
+ "num_codevector_groups": 2,
214
+ "num_codevectors_per_group": 320,
215
+ "num_conv_pos_embedding_groups": 16,
216
+ "num_conv_pos_embeddings": 128,
217
+ "num_feat_extract_layers": 7,
218
+ "num_hidden_layers": 24,
219
+ "num_negatives": 100,
220
+ "num_return_sequences": 1,
221
+ "output_attentions": false,
222
+ "output_hidden_size": 1024,
223
+ "output_hidden_states": false,
224
+ "output_scores": false,
225
+ "pad_token_id": 0,
226
+ "prefix": null,
227
+ "problem_type": null,
228
+ "proj_codevector_dim": 768,
229
+ "pruned_heads": {},
230
+ "remove_invalid_values": false,
231
+ "repetition_penalty": 1.0,
232
+ "return_dict": true,
233
+ "return_dict_in_generate": false,
234
+ "sep_token_id": null,
235
+ "task_specific_params": null,
236
+ "tdnn_dilation": [
237
+ 1,
238
+ 2,
239
+ 3,
240
+ 1,
241
+ 1
242
+ ],
243
+ "tdnn_dim": [
244
+ 512,
245
+ 512,
246
+ 512,
247
+ 512,
248
+ 1500
249
+ ],
250
+ "tdnn_kernel": [
251
+ 5,
252
+ 3,
253
+ 3,
254
+ 1,
255
+ 1
256
+ ],
257
+ "temperature": 1.0,
258
+ "tie_encoder_decoder": false,
259
+ "tie_word_embeddings": true,
260
+ "tokenizer_class": null,
261
+ "top_k": 50,
262
+ "top_p": 1.0,
263
+ "torch_dtype": null,
264
+ "torchscript": false,
265
+ "transformers_version": "4.15.0.dev0",
266
+ "use_bfloat16": false,
267
+ "use_weighted_layer_sum": false,
268
+ "vocab_size": 32,
269
+ "xvector_output_dim": 512
270
+ },
271
+ "eos_token_id": 2,
272
+ "is_encoder_decoder": true,
273
+ "max_length": 200,
274
+ "model_type": "speech-encoder-decoder",
275
+ "num_beams": 5,
276
+ "pad_token_id": 1,
277
+ "processor_class": "Wav2Vec2Processor",
278
+ "tie_word_embeddings": false,
279
+ "torch_dtype": "float32",
280
+ "transformers_version": null,
281
+ "use_cache": false
282
+ }
create_model.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from transformers import SpeechEncoderDecoderModel, AutoFeatureExtractor, AutoTokenizer
3
+ import torch
4
+
5
+
6
+ encoder_id = "facebook/wav2vec2-large-lv60"
7
+ decoder_id = "facebook/bart-large"
8
+
9
+ model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True)
10
+ model.config.encoder.feat_proj_dropout = 0.0
11
+ model.config.encoder.mask_time_prob = 0.0
12
+ model.config.decoder_start_token_id = model.decoder.config.bos_token_id
13
+ model.config.pad_token_id = model.decoder.config.pad_token_id
14
+ model.config.eos_token_id = model.decoder.config.eos_token_id
15
+ model.config.max_length = 200
16
+ model.config.num_beams = 5
17
+ model.config.encoder.layerdrop = 0.0
18
+ model.config.use_cache = False
19
+ model.config.processor_class = "Wav2Vec2Processor"
20
+
21
+ # check if generation works
22
+ out = model.generate(torch.ones((1, 2000)))
23
+
24
+ model.save_pretrained("./")
25
+
26
+ feature_etxractor = AutoFeatureExtractor.from_pretrained(encoder_id)
27
+ feature_etxractor.save_pretrained("./")
28
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
29
+ tokenizer.save_pretrained("./")
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c4389fbc767aa08abec105d529188c6da90b4b0d5c72437b61775fb69f080e7
3
+ size 2278370941
run_librispeech.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #CUDA_VISIBLE_DEVICES="0" python run_speech_recognition_seq2seq.py \
3
+ python -m torch.distributed.launch \
4
+ --nproc_per_node 8 run_speech_recognition_seq2seq.py \
5
+ --dataset_name="librispeech_asr" \
6
+ --model_name_or_path="./" \
7
+ --dataset_config_name="clean" \
8
+ --train_split_name="train.100" \
9
+ --eval_split_name="validation" \
10
+ --output_dir="./" \
11
+ --preprocessing_num_workers="16" \
12
+ --length_column_name="input_length" \
13
+ --overwrite_output_dir \
14
+ --num_train_epochs="5" \
15
+ --per_device_train_batch_size="4" \
16
+ --per_device_eval_batch_size="4" \
17
+ --gradient_accumulation_steps="2" \
18
+ --learning_rate="3e-4" \
19
+ --warmup_steps="400" \
20
+ --evaluation_strategy="steps" \
21
+ --text_column_name="text" \
22
+ --save_steps="500" \
23
+ --eval_steps="500" \
24
+ --logging_steps="10" \
25
+ --save_total_limit="1" \
26
+ --freeze_feature_extractor \
27
+ --gradient_checkpointing \
28
+ --fp16 \
29
+ --group_by_length \
30
+ --predict_with_generate \
31
+ --do_eval
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "facebook/bart-large", "tokenizer_class": "BartTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff