gmenon commited on
Commit
8d3eb06
1 Parent(s): a006412
config.json CHANGED
@@ -3,7 +3,7 @@
3
  "SpeechEncoderDecoderModel"
4
  ],
5
  "decoder": {
6
- "_name_or_path": "facebook/bart-large",
7
  "activation_dropout": 0.1,
8
  "activation_function": "gelu",
9
  "add_bias_logits": false,
@@ -20,20 +20,20 @@
20
  "classif_dropout": 0.1,
21
  "classifier_dropout": 0.0,
22
  "cross_attention_hidden_size": null,
23
- "d_model": 1024,
24
- "decoder_attention_heads": 16,
25
- "decoder_ffn_dim": 4096,
26
  "decoder_layerdrop": 0.0,
27
- "decoder_layers": 12,
28
  "decoder_start_token_id": 2,
29
  "diversity_penalty": 0.0,
30
  "do_sample": false,
31
  "dropout": 0.1,
32
  "early_stopping": true,
33
- "encoder_attention_heads": 16,
34
- "encoder_ffn_dim": 4096,
35
  "encoder_layerdrop": 0.0,
36
- "encoder_layers": 12,
37
  "encoder_no_repeat_ngram_size": 0,
38
  "eos_token_id": 2,
39
  "exponential_decay_length_penalty": null,
@@ -61,9 +61,10 @@
61
  "model_type": "bart",
62
  "no_repeat_ngram_size": 3,
63
  "normalize_before": false,
 
64
  "num_beam_groups": 1,
65
  "num_beams": 4,
66
- "num_hidden_layers": 12,
67
  "num_return_sequences": 1,
68
  "output_attentions": false,
69
  "output_hidden_states": false,
@@ -106,7 +107,7 @@
106
  "tokenizer_class": null,
107
  "top_k": 50,
108
  "top_p": 1.0,
109
- "torch_dtype": null,
110
  "torchscript": false,
111
  "typical_p": 1.0,
112
  "use_bfloat16": false,
 
3
  "SpeechEncoderDecoderModel"
4
  ],
5
  "decoder": {
6
+ "_name_or_path": "facebook/bart-base",
7
  "activation_dropout": 0.1,
8
  "activation_function": "gelu",
9
  "add_bias_logits": false,
 
20
  "classif_dropout": 0.1,
21
  "classifier_dropout": 0.0,
22
  "cross_attention_hidden_size": null,
23
+ "d_model": 768,
24
+ "decoder_attention_heads": 12,
25
+ "decoder_ffn_dim": 3072,
26
  "decoder_layerdrop": 0.0,
27
+ "decoder_layers": 6,
28
  "decoder_start_token_id": 2,
29
  "diversity_penalty": 0.0,
30
  "do_sample": false,
31
  "dropout": 0.1,
32
  "early_stopping": true,
33
+ "encoder_attention_heads": 12,
34
+ "encoder_ffn_dim": 3072,
35
  "encoder_layerdrop": 0.0,
36
+ "encoder_layers": 6,
37
  "encoder_no_repeat_ngram_size": 0,
38
  "eos_token_id": 2,
39
  "exponential_decay_length_penalty": null,
 
61
  "model_type": "bart",
62
  "no_repeat_ngram_size": 3,
63
  "normalize_before": false,
64
+ "normalize_embedding": true,
65
  "num_beam_groups": 1,
66
  "num_beams": 4,
67
+ "num_hidden_layers": 6,
68
  "num_return_sequences": 1,
69
  "output_attentions": false,
70
  "output_hidden_states": false,
 
107
  "tokenizer_class": null,
108
  "top_k": 50,
109
  "top_p": 1.0,
110
+ "torch_dtype": "float32",
111
  "torchscript": false,
112
  "typical_p": 1.0,
113
  "use_bfloat16": false,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57923adad3ced91b7979e1fea25b8ab67480b6461bf758f8f69ca3f76d6bc098
3
- size 2278337685
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca8fd76aa7ed61545e51c6e8f239069cefec1e6c185d200b69b3e7c71bdb84df
3
+ size 1649512941
run_speech_recognition_seq2seq.py DELETED
@@ -1 +0,0 @@
1
- /mnt/lscratch/users/gmenon/transformers/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py