DylanonWic commited on
Commit
9a7c220
1 Parent(s): a04e3ce

Training in progress, step 400

Browse files
Files changed (3) hide show
  1. config.json +17 -18
  2. pytorch_model.bin +2 -2
  3. training_args.bin +2 -2
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "facebook/wav2vec2-base",
3
  "activation_dropout": 0.0,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
@@ -11,9 +11,9 @@
11
  "attention_dropout": 0.1,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
- "codevector_dim": 256,
15
  "contrastive_logits_temperature": 0.1,
16
- "conv_bias": false,
17
  "conv_dim": [
18
  512,
19
  512,
@@ -44,21 +44,22 @@
44
  "ctc_loss_reduction": "mean",
45
  "ctc_zero_infinity": false,
46
  "diversity_loss_weight": 0.1,
47
- "do_stable_layer_norm": false,
48
  "eos_token_id": 2,
49
  "feat_extract_activation": "gelu",
50
- "feat_extract_norm": "group",
 
51
  "feat_proj_dropout": 0.1,
52
  "feat_quantizer_dropout": 0.0,
53
  "final_dropout": 0.0,
54
- "freeze_feat_extract_train": true,
55
  "hidden_act": "gelu",
56
  "hidden_dropout": 0.1,
57
- "hidden_size": 768,
58
  "initializer_range": 0.02,
59
- "intermediate_size": 3072,
60
  "layer_norm_eps": 1e-05,
61
- "layerdrop": 0.0,
62
  "mask_channel_length": 10,
63
  "mask_channel_min_space": 1,
64
  "mask_channel_other": 0.0,
@@ -71,23 +72,21 @@
71
  "mask_time_min_masks": 2,
72
  "mask_time_min_space": 1,
73
  "mask_time_other": 0.0,
74
- "mask_time_prob": 0.05,
75
  "mask_time_selection": "static",
76
  "model_type": "wav2vec2",
77
- "no_mask_channel_overlap": false,
78
- "no_mask_time_overlap": false,
79
  "num_adapter_layers": 3,
80
- "num_attention_heads": 12,
81
  "num_codevector_groups": 2,
82
  "num_codevectors_per_group": 320,
83
  "num_conv_pos_embedding_groups": 16,
84
  "num_conv_pos_embeddings": 128,
85
  "num_feat_extract_layers": 7,
86
- "num_hidden_layers": 12,
87
  "num_negatives": 100,
88
- "output_hidden_size": 768,
89
  "pad_token_id": 69,
90
- "proj_codevector_dim": 256,
91
  "tdnn_dilation": [
92
  1,
93
  2,
@@ -110,8 +109,8 @@
110
  1
111
  ],
112
  "torch_dtype": "float32",
113
- "transformers_version": "4.26.0",
114
  "use_weighted_layer_sum": false,
115
- "vocab_size": 128,
116
  "xvector_output_dim": 512
117
  }
 
1
  {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
  "activation_dropout": 0.0,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
 
11
  "attention_dropout": 0.1,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
  "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
  "conv_dim": [
18
  512,
19
  512,
 
44
  "ctc_loss_reduction": "mean",
45
  "ctc_zero_infinity": false,
46
  "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
  "eos_token_id": 2,
49
  "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
  "feat_proj_dropout": 0.1,
53
  "feat_quantizer_dropout": 0.0,
54
  "final_dropout": 0.0,
55
+ "gradient_checkpointing": false,
56
  "hidden_act": "gelu",
57
  "hidden_dropout": 0.1,
58
+ "hidden_size": 1024,
59
  "initializer_range": 0.02,
60
+ "intermediate_size": 4096,
61
  "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.1,
63
  "mask_channel_length": 10,
64
  "mask_channel_min_space": 1,
65
  "mask_channel_other": 0.0,
 
72
  "mask_time_min_masks": 2,
73
  "mask_time_min_space": 1,
74
  "mask_time_other": 0.0,
75
+ "mask_time_prob": 0.075,
76
  "mask_time_selection": "static",
77
  "model_type": "wav2vec2",
 
 
78
  "num_adapter_layers": 3,
79
+ "num_attention_heads": 16,
80
  "num_codevector_groups": 2,
81
  "num_codevectors_per_group": 320,
82
  "num_conv_pos_embedding_groups": 16,
83
  "num_conv_pos_embeddings": 128,
84
  "num_feat_extract_layers": 7,
85
+ "num_hidden_layers": 24,
86
  "num_negatives": 100,
87
+ "output_hidden_size": 1024,
88
  "pad_token_id": 69,
89
+ "proj_codevector_dim": 768,
90
  "tdnn_dilation": [
91
  1,
92
  2,
 
109
  1
110
  ],
111
  "torch_dtype": "float32",
112
+ "transformers_version": "4.26.1",
113
  "use_weighted_layer_sum": false,
114
+ "vocab_size": 70,
115
  "xvector_output_dim": 512
116
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5c130d887c022ccc5b21aca7ea96bbd8c2a2467ed9770bdd62985caddad0a7c
3
- size 377954081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ebfddb0530761bfcbf61046f8cbcc619fbed78e873799bf86a22e740c132273
3
+ size 1262188845
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ef3ae782dfc7b4eede3b53656d955faf2ed471711d1e8141acbc7ac28f6de28
3
- size 3515
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce2c0c66a21281345382990dca0beda63eb3d9666127009f53b51a84eaeef0a7
3
+ size 3579