Titouan commited on
Commit
40b72cc
1 Parent(s): 93d30e3

new converted model

Browse files
Files changed (2) hide show
  1. config.json +14 -7
  2. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "activation_dropout": 0.1,
3
  "apply_spec_augment": true,
4
  "architectures": [
5
  "Wav2Vec2Model"
@@ -36,26 +36,33 @@
36
  ],
37
  "ctc_loss_reduction": "sum",
38
  "ctc_zero_infinity": false,
39
- "do_stable_layer_norm": false,
40
  "eos_token_id": 2,
41
  "feat_extract_activation": "gelu",
42
  "feat_extract_dropout": 0.0,
43
- "feat_extract_norm": "group",
44
  "feat_proj_dropout": 0.1,
45
- "final_dropout": 0.1,
46
  "gradient_checkpointing": false,
47
  "hidden_act": "gelu",
48
  "hidden_dropout": 0.1,
49
- "hidden_dropout_prob": 0.1,
50
  "hidden_size": 1024,
51
  "initializer_range": 0.02,
52
  "intermediate_size": 4096,
53
  "layer_norm_eps": 1e-05,
54
  "layerdrop": 0.1,
 
 
 
 
 
55
  "mask_feature_length": 10,
56
  "mask_feature_prob": 0.0,
57
  "mask_time_length": 10,
58
- "mask_time_prob": 0.05,
 
 
 
59
  "model_type": "wav2vec2",
60
  "num_attention_heads": 16,
61
  "num_conv_pos_embedding_groups": 16,
@@ -63,6 +70,6 @@
63
  "num_feat_extract_layers": 7,
64
  "num_hidden_layers": 24,
65
  "pad_token_id": 0,
66
- "transformers_version": "4.6.0.dev0",
67
  "vocab_size": 32
68
  }
1
  {
2
+ "activation_dropout": 0.0,
3
  "apply_spec_augment": true,
4
  "architectures": [
5
  "Wav2Vec2Model"
36
  ],
37
  "ctc_loss_reduction": "sum",
38
  "ctc_zero_infinity": false,
39
+ "do_stable_layer_norm": true,
40
  "eos_token_id": 2,
41
  "feat_extract_activation": "gelu",
42
  "feat_extract_dropout": 0.0,
43
+ "feat_extract_norm": "layer",
44
  "feat_proj_dropout": 0.1,
45
+ "final_dropout": 0.0,
46
  "gradient_checkpointing": false,
47
  "hidden_act": "gelu",
48
  "hidden_dropout": 0.1,
 
49
  "hidden_size": 1024,
50
  "initializer_range": 0.02,
51
  "intermediate_size": 4096,
52
  "layer_norm_eps": 1e-05,
53
  "layerdrop": 0.1,
54
+ "mask_channel_length": 10,
55
+ "mask_channel_min_space": 1,
56
+ "mask_channel_other": 0.0,
57
+ "mask_channel_prob": 0.0,
58
+ "mask_channel_selection": "static",
59
  "mask_feature_length": 10,
60
  "mask_feature_prob": 0.0,
61
  "mask_time_length": 10,
62
+ "mask_time_min_space": 1,
63
+ "mask_time_other": 0.0,
64
+ "mask_time_prob": 0.075,
65
+ "mask_time_selection": "static",
66
  "model_type": "wav2vec2",
67
  "num_attention_heads": 16,
68
  "num_conv_pos_embedding_groups": 16,
70
  "num_feat_extract_layers": 7,
71
  "num_hidden_layers": 24,
72
  "pad_token_id": 0,
73
+ "transformers_version": "4.5.1",
74
  "vocab_size": 32
75
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d512d8551cfa20790cafe295a2733964c0190daeb8dc4cf6af1cb3cdca67cdf
3
- size 1261821083
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aa1459babe699823014893eaa70df2a69cbf98d5e682b178f378df3b2618077
3
+ size 1261920939