EzraWilliam commited on
Commit
8bbd55a
·
verified ·
1 Parent(s): 8e7e3c6

End of training

Browse files
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  license: apache-2.0
 
3
  tags:
4
  - generated_from_trainer
5
- base_model: EzraWilliam/wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1
6
  model-index:
7
  - name: wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1
8
  results: []
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1
15
 
16
- This model is a fine-tuned version of [EzraWilliam/wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1](https://huggingface.co/EzraWilliam/wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1) on the None dataset.
17
 
18
  ## Model description
19
 
@@ -32,13 +32,15 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 0.0001
36
- - train_batch_size: 32
37
  - eval_batch_size: 8
38
  - seed: 42
 
 
39
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
  - lr_scheduler_type: linear
41
- - lr_scheduler_warmup_steps: 1000
42
  - num_epochs: 30
43
  - mixed_precision_training: Native AMP
44
 
 
1
  ---
2
  license: apache-2.0
3
+ base_model: facebook/wav2vec2-xls-r-300m
4
  tags:
5
  - generated_from_trainer
 
6
  model-index:
7
  - name: wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1
8
  results: []
 
13
 
14
  # wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1
15
 
16
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset.
17
 
18
  ## Model description
19
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 0.0003
36
+ - train_batch_size: 16
37
  - eval_batch_size: 8
38
  - seed: 42
39
+ - gradient_accumulation_steps: 2
40
+ - total_train_batch_size: 32
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
+ - lr_scheduler_warmup_steps: 500
44
  - num_epochs: 30
45
  - mixed_precision_training: Native AMP
46
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "EzraWilliam/wav2vec2-base-fleurs-CommonVoice-demo-google-colab-Ezra_William_Prod1",
3
  "activation_dropout": 0.0,
4
  "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
@@ -9,12 +9,12 @@
9
  "architectures": [
10
  "Wav2Vec2ForCTC"
11
  ],
12
- "attention_dropout": 0.1,
13
  "bos_token_id": 1,
14
  "classifier_proj_size": 256,
15
- "codevector_dim": 256,
16
  "contrastive_logits_temperature": 0.1,
17
- "conv_bias": false,
18
  "conv_dim": [
19
  512,
20
  512,
@@ -45,50 +45,41 @@
45
  "ctc_loss_reduction": "mean",
46
  "ctc_zero_infinity": false,
47
  "diversity_loss_weight": 0.1,
48
- "do_stable_layer_norm": false,
49
  "eos_token_id": 2,
50
  "feat_extract_activation": "gelu",
51
- "feat_extract_norm": "group",
52
- "feat_proj_dropout": 0.1,
 
53
  "feat_quantizer_dropout": 0.0,
54
  "final_dropout": 0.0,
55
- "freeze_feat_extract_train": true,
56
  "hidden_act": "gelu",
57
- "hidden_dropout": 0.1,
58
- "hidden_size": 768,
59
  "initializer_range": 0.02,
60
- "intermediate_size": 3072,
61
  "layer_norm_eps": 1e-05,
62
  "layerdrop": 0.0,
63
- "mask_channel_length": 10,
64
- "mask_channel_min_space": 1,
65
- "mask_channel_other": 0.0,
66
- "mask_channel_prob": 0.0,
67
- "mask_channel_selection": "static",
68
  "mask_feature_length": 10,
69
  "mask_feature_min_masks": 0,
70
  "mask_feature_prob": 0.0,
71
  "mask_time_length": 10,
72
  "mask_time_min_masks": 2,
73
- "mask_time_min_space": 1,
74
- "mask_time_other": 0.0,
75
  "mask_time_prob": 0.05,
76
- "mask_time_selection": "static",
77
  "model_type": "wav2vec2",
78
- "no_mask_channel_overlap": false,
79
- "no_mask_time_overlap": false,
80
  "num_adapter_layers": 3,
81
- "num_attention_heads": 12,
82
  "num_codevector_groups": 2,
83
  "num_codevectors_per_group": 320,
84
  "num_conv_pos_embedding_groups": 16,
85
  "num_conv_pos_embeddings": 128,
86
  "num_feat_extract_layers": 7,
87
- "num_hidden_layers": 12,
88
  "num_negatives": 100,
89
- "output_hidden_size": 768,
90
  "pad_token_id": 30,
91
- "proj_codevector_dim": 256,
92
  "tdnn_dilation": [
93
  1,
94
  2,
@@ -113,6 +104,6 @@
113
  "torch_dtype": "float32",
114
  "transformers_version": "4.37.0",
115
  "use_weighted_layer_sum": false,
116
- "vocab_size": 32,
117
  "xvector_output_dim": 512
118
  }
 
1
  {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
  "activation_dropout": 0.0,
4
  "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
 
9
  "architectures": [
10
  "Wav2Vec2ForCTC"
11
  ],
12
+ "attention_dropout": 0.0,
13
  "bos_token_id": 1,
14
  "classifier_proj_size": 256,
15
+ "codevector_dim": 768,
16
  "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
  "conv_dim": [
19
  512,
20
  512,
 
45
  "ctc_loss_reduction": "mean",
46
  "ctc_zero_infinity": false,
47
  "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
  "eos_token_id": 2,
50
  "feat_extract_activation": "gelu",
51
+ "feat_extract_dropout": 0.0,
52
+ "feat_extract_norm": "layer",
53
+ "feat_proj_dropout": 0.0,
54
  "feat_quantizer_dropout": 0.0,
55
  "final_dropout": 0.0,
56
+ "gradient_checkpointing": false,
57
  "hidden_act": "gelu",
58
+ "hidden_dropout": 0.0,
59
+ "hidden_size": 1024,
60
  "initializer_range": 0.02,
61
+ "intermediate_size": 4096,
62
  "layer_norm_eps": 1e-05,
63
  "layerdrop": 0.0,
 
 
 
 
 
64
  "mask_feature_length": 10,
65
  "mask_feature_min_masks": 0,
66
  "mask_feature_prob": 0.0,
67
  "mask_time_length": 10,
68
  "mask_time_min_masks": 2,
 
 
69
  "mask_time_prob": 0.05,
 
70
  "model_type": "wav2vec2",
 
 
71
  "num_adapter_layers": 3,
72
+ "num_attention_heads": 16,
73
  "num_codevector_groups": 2,
74
  "num_codevectors_per_group": 320,
75
  "num_conv_pos_embedding_groups": 16,
76
  "num_conv_pos_embeddings": 128,
77
  "num_feat_extract_layers": 7,
78
+ "num_hidden_layers": 24,
79
  "num_negatives": 100,
80
+ "output_hidden_size": 1024,
81
  "pad_token_id": 30,
82
+ "proj_codevector_dim": 768,
83
  "tdnn_dilation": [
84
  1,
85
  2,
 
104
  "torch_dtype": "float32",
105
  "transformers_version": "4.37.0",
106
  "use_weighted_layer_sum": false,
107
+ "vocab_size": 33,
108
  "xvector_output_dim": 512
109
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:400f92add00ff328beff889a4ad7330191da12716a516b058fa9ff4f706472f5
3
- size 377611120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:807a8131841a0f1c7319655b6313cf70d5ebe9be2c5f5a6066e10807f6ee2010
3
+ size 1261942780
runs/Jan22_13-01-32_d5d0e8fdc236/events.out.tfevents.1705928503.d5d0e8fdc236.728.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9034456d3a1b77c927c95b8675320aa2c7f20473abb0bc030b0a2c4ba10fd2c7
3
+ size 6314
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:31680f817fef871c446668b83e3fb048a9980bc5b4ff15b9952f6fe26937f1b0
3
  size 4792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdf89d1962960b3af1d6ec2106ace5a78ace764bffcff5a8cdb298813ef1f5bc
3
  size 4792