patrickvonplaten commited on
Commit
fadfcc5
1 Parent(s): e36652b

correct config and add converted model

Browse files
Files changed (2) hide show
  1. config.json +36 -5
  2. pytorch_model.bin +3 -0
config.json CHANGED
@@ -1,12 +1,16 @@
1
  {
2
  "activation_dropout": 0.0,
 
 
 
3
  "apply_spec_augment": true,
4
  "architectures": [
5
- "Wav2Vec2Model"
6
  ],
7
  "attention_dropout": 0.1,
8
  "bos_token_id": 1,
9
- "codevector_dim": 384,
 
10
  "contrastive_logits_temperature": 0.1,
11
  "conv_bias": true,
12
  "conv_dim": [
@@ -61,13 +65,16 @@
61
  "mask_channel_prob": 0.0,
62
  "mask_channel_selection": "static",
63
  "mask_feature_length": 10,
 
64
  "mask_feature_prob": 0.0,
65
  "mask_time_length": 10,
 
66
  "mask_time_min_space": 1,
67
  "mask_time_other": 0.0,
68
  "mask_time_prob": 0.075,
69
  "mask_time_selection": "static",
70
  "model_type": "wav2vec2",
 
71
  "num_attention_heads": 16,
72
  "num_codevector_groups": 2,
73
  "num_codevectors_per_group": 320,
@@ -76,9 +83,33 @@
76
  "num_feat_extract_layers": 7,
77
  "num_hidden_layers": 24,
78
  "num_negatives": 100,
 
79
  "pad_token_id": 0,
80
- "proj_codevector_dim": 256,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  "torch_dtype": "float32",
82
- "transformers_version": "4.9.2",
83
- "vocab_size": 32
 
 
84
  }
1
  {
2
  "activation_dropout": 0.0,
3
+ "adapter_kernel_size": 3,
4
+ "adapter_stride": 2,
5
+ "add_adapter": false,
6
  "apply_spec_augment": true,
7
  "architectures": [
8
+ "Wav2Vec2ForPreTraining"
9
  ],
10
  "attention_dropout": 0.1,
11
  "bos_token_id": 1,
12
+ "classifier_proj_size": 256,
13
+ "codevector_dim": 768,
14
  "contrastive_logits_temperature": 0.1,
15
  "conv_bias": true,
16
  "conv_dim": [
65
  "mask_channel_prob": 0.0,
66
  "mask_channel_selection": "static",
67
  "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
  "mask_feature_prob": 0.0,
70
  "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
  "mask_time_min_space": 1,
73
  "mask_time_other": 0.0,
74
  "mask_time_prob": 0.075,
75
  "mask_time_selection": "static",
76
  "model_type": "wav2vec2",
77
+ "num_adapter_layers": 3,
78
  "num_attention_heads": 16,
79
  "num_codevector_groups": 2,
80
  "num_codevectors_per_group": 320,
83
  "num_feat_extract_layers": 7,
84
  "num_hidden_layers": 24,
85
  "num_negatives": 100,
86
+ "output_hidden_size": 1024,
87
  "pad_token_id": 0,
88
+ "proj_codevector_dim": 768,
89
+ "tdnn_dilation": [
90
+ 1,
91
+ 2,
92
+ 3,
93
+ 1,
94
+ 1
95
+ ],
96
+ "tdnn_dim": [
97
+ 512,
98
+ 512,
99
+ 512,
100
+ 512,
101
+ 1500
102
+ ],
103
+ "tdnn_kernel": [
104
+ 5,
105
+ 3,
106
+ 3,
107
+ 1,
108
+ 1
109
+ ],
110
  "torch_dtype": "float32",
111
+ "transformers_version": "4.18.0.dev0",
112
+ "use_weighted_layer_sum": false,
113
+ "vocab_size": 32,
114
+ "xvector_output_dim": 512
115
  }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15152c284ee5942a27d874ee845afaa88c93d4faed178915fcb62fb002eac61c
3
+ size 1269701863