marinone94 commited on
Commit
9415f2a
1 Parent(s): a13c088

Training in progress, step 5

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .ipynb_checkpoints/README-checkpoint.md +0 -68
  2. .ipynb_checkpoints/added_tokens-checkpoint.json +0 -1
  3. .ipynb_checkpoints/all_results-checkpoint.json +0 -8
  4. .ipynb_checkpoints/config-checkpoint.json +0 -107
  5. .ipynb_checkpoints/eval_results-checkpoint.json +0 -9
  6. .ipynb_checkpoints/preprocessor_config-checkpoint.json +0 -9
  7. .ipynb_checkpoints/run-dummy-ab-gpu-checkpoint.sh +22 -0
  8. .ipynb_checkpoints/run-dummy-sv-gpu-checkpoint.sh +2 -2
  9. .ipynb_checkpoints/special_tokens_map-checkpoint.json +0 -1
  10. .ipynb_checkpoints/tokenizer_config-checkpoint.json +0 -1
  11. .ipynb_checkpoints/train_results-checkpoint.json +0 -8
  12. .ipynb_checkpoints/trainer_state-checkpoint.json +0 -103
  13. .ipynb_checkpoints/vocab-checkpoint.json +0 -1
  14. README.md +0 -67
  15. added_tokens.json +1 -1
  16. all_results.json +0 -14
  17. checkpoint-400/config.json +0 -107
  18. checkpoint-400/optimizer.pt +0 -3
  19. checkpoint-400/pytorch_model.bin +0 -3
  20. checkpoint-400/trainer_state.json +0 -328
  21. checkpoint-450/config.json +0 -107
  22. checkpoint-450/optimizer.pt +0 -3
  23. checkpoint-450/preprocessor_config.json +0 -9
  24. checkpoint-450/pytorch_model.bin +0 -3
  25. checkpoint-450/rng_state.pth +0 -3
  26. checkpoint-450/scaler.pt +0 -3
  27. checkpoint-450/scheduler.pt +0 -3
  28. checkpoint-450/trainer_state.json +0 -367
  29. {checkpoint-500 → checkpoint-5}/config.json +29 -40
  30. checkpoint-400/training_args.bin → checkpoint-5/optimizer.pt +2 -2
  31. {checkpoint-400 → checkpoint-5}/preprocessor_config.json +2 -2
  32. checkpoint-400/scaler.pt → checkpoint-5/pytorch_model.bin +2 -2
  33. {checkpoint-400 → checkpoint-5}/rng_state.pth +1 -1
  34. {checkpoint-500 → checkpoint-5}/scaler.pt +1 -1
  35. {checkpoint-400 → checkpoint-5}/scheduler.pt +1 -1
  36. checkpoint-5/trainer_state.json +15 -0
  37. {checkpoint-450 → checkpoint-5}/training_args.bin +1 -1
  38. checkpoint-500/optimizer.pt +0 -3
  39. checkpoint-500/preprocessor_config.json +0 -9
  40. checkpoint-500/pytorch_model.bin +0 -3
  41. checkpoint-500/rng_state.pth +0 -3
  42. checkpoint-500/scheduler.pt +0 -3
  43. checkpoint-500/trainer_state.json +0 -55
  44. checkpoint-500/training_args.bin +0 -3
  45. config.json +29 -40
  46. eval_results.json +0 -9
  47. preprocessor_config.json +2 -2
  48. pytorch_model.bin +2 -2
  49. run-dummy-ab-gpu.sh +22 -0
  50. run-dummy-sv-gpu.sh +2 -2
.ipynb_checkpoints/README-checkpoint.md DELETED
@@ -1,68 +0,0 @@
1
- ---
2
- language:
3
- - sv-SE
4
- license: apache-2.0
5
- tags:
6
- - automatic-speech-recognition
7
- - mozilla-foundation/common_voice_7_0
8
- - generated_from_trainer
9
- datasets:
10
- - common_voice
11
- model-index:
12
- - name: ''
13
- results: []
14
- ---
15
-
16
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
- should probably proofread and complete it, then remove this comment. -->
18
-
19
- #
20
-
21
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - SV-SE dataset.
22
- It achieves the following results on the evaluation set:
23
- - Loss: 14.2136
24
- - Wer: 1.0
25
-
26
- ## Model description
27
-
28
- More information needed
29
-
30
- ## Intended uses & limitations
31
-
32
- More information needed
33
-
34
- ## Training and evaluation data
35
-
36
- More information needed
37
-
38
- ## Training procedure
39
-
40
- ### Training hyperparameters
41
-
42
- The following hyperparameters were used during training:
43
- - learning_rate: 0.0075
44
- - train_batch_size: 8
45
- - eval_batch_size: 8
46
- - seed: 42
47
- - gradient_accumulation_steps: 4
48
- - total_train_batch_size: 32
49
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
- - lr_scheduler_type: linear
51
- - lr_scheduler_warmup_steps: 2000
52
- - training_steps: 10
53
- - mixed_precision_training: Native AMP
54
-
55
- ### Training results
56
-
57
- | Training Loss | Epoch | Step | Validation Loss | Wer |
58
- |:-------------:|:-----:|:----:|:---------------:|:---:|
59
- | 10.3458 | 0.01 | 5 | 15.0991 | 1.0 |
60
- | 11.9029 | 0.03 | 10 | 14.2136 | 1.0 |
61
-
62
-
63
- ### Framework versions
64
-
65
- - Transformers 4.16.0.dev0
66
- - Pytorch 1.10.1+cu102
67
- - Datasets 1.17.1.dev0
68
- - Tokenizers 0.11.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/added_tokens-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"<s>": 35, "</s>": 36}
 
 
.ipynb_checkpoints/all_results-checkpoint.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "epoch": 0.03,
3
- "train_loss": 12.162414741516113,
4
- "train_runtime": 400.3916,
5
- "train_samples": 11030,
6
- "train_samples_per_second": 0.799,
7
- "train_steps_per_second": 0.025
8
- }
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/config-checkpoint.json DELETED
@@ -1,107 +0,0 @@
1
- {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.1,
4
- "adapter_kernel_size": 3,
5
- "adapter_stride": 2,
6
- "add_adapter": false,
7
- "apply_spec_augment": true,
8
- "architectures": [
9
- "Wav2Vec2ForCTC"
10
- ],
11
- "attention_dropout": 0.0,
12
- "bos_token_id": 1,
13
- "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
- "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
- "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
- ],
26
- "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
- ],
35
- "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
- ],
44
- "ctc_loss_reduction": "mean",
45
- "ctc_zero_infinity": false,
46
- "diversity_loss_weight": 0.1,
47
- "do_stable_layer_norm": true,
48
- "eos_token_id": 2,
49
- "feat_extract_activation": "gelu",
50
- "feat_extract_dropout": 0.0,
51
- "feat_extract_norm": "layer",
52
- "feat_proj_dropout": 0.0,
53
- "feat_quantizer_dropout": 0.0,
54
- "final_dropout": 0.0,
55
- "hidden_act": "gelu",
56
- "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
58
- "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
- "layer_norm_eps": 1e-05,
61
- "layerdrop": 0.0,
62
- "mask_feature_length": 64,
63
- "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.25,
65
- "mask_time_length": 10,
66
- "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.75,
68
- "model_type": "wav2vec2",
69
- "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
- "num_codevector_groups": 2,
72
- "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 34,
80
- "proj_codevector_dim": 768,
81
- "tdnn_dilation": [
82
- 1,
83
- 2,
84
- 3,
85
- 1,
86
- 1
87
- ],
88
- "tdnn_dim": [
89
- 512,
90
- 512,
91
- 512,
92
- 512,
93
- 1500
94
- ],
95
- "tdnn_kernel": [
96
- 5,
97
- 3,
98
- 3,
99
- 1,
100
- 1
101
- ],
102
- "torch_dtype": "float32",
103
- "transformers_version": "4.16.0.dev0",
104
- "use_weighted_layer_sum": false,
105
- "vocab_size": 37,
106
- "xvector_output_dim": 512
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/eval_results-checkpoint.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "epoch": 1.0,
3
- "eval_loss": 3.82973051071167,
4
- "eval_runtime": 132.808,
5
- "eval_samples": 4620,
6
- "eval_samples_per_second": 34.787,
7
- "eval_steps_per_second": 4.352,
8
- "eval_wer": 1.0
9
- }
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/preprocessor_config-checkpoint.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
- "feature_size": 1,
5
- "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": true,
8
- "sampling_rate": 16000
9
- }
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/run-dummy-ab-gpu-checkpoint.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_7_0" \
3
+ --model_name_or_path="hf-test/xls-r-dummy" \
4
+ --dataset_config_name="ab" \
5
+ --output_dir="./" \
6
+ --overwrite_output_dir \
7
+ --max_steps="10" \
8
+ --per_device_train_batch_size="2" \
9
+ --learning_rate="3e-4" \
10
+ --save_total_limit="1" \
11
+ --evaluation_strategy="steps" \
12
+ --text_column_name="sentence" \
13
+ --length_column_name="input_length" \
14
+ --save_steps="5" \
15
+ --layerdrop="0.0" \
16
+ --freeze_feature_encoder \
17
+ --gradient_checkpointing \
18
+ --fp16 \
19
+ --group_by_length \
20
+ --push_to_hub \
21
+ --use_auth_token \
22
+ --do_train --do_eval
.ipynb_checkpoints/run-dummy-sv-gpu-checkpoint.sh CHANGED
@@ -1,7 +1,7 @@
1
  python run_speech_recognition_ctc.py \
2
  --dataset_name="mozilla-foundation/common_voice_7_0" \
3
- --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
- --dataset_config_name="sv-SE" \
5
  --output_dir="./" \
6
  --overwrite_output_dir \
7
  --max_steps="10" \
 
1
  python run_speech_recognition_ctc.py \
2
  --dataset_name="mozilla-foundation/common_voice_7_0" \
3
+ --model_name_or_path="hf-test/xls-r-dummy" \
4
+ --dataset_config_name="ab" \
5
  --output_dir="./" \
6
  --overwrite_output_dir \
7
  --max_steps="10" \
.ipynb_checkpoints/special_tokens_map-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
 
.ipynb_checkpoints/tokenizer_config-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
 
 
.ipynb_checkpoints/train_results-checkpoint.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "epoch": 0.03,
3
- "train_loss": 12.162414741516113,
4
- "train_runtime": 400.3916,
5
- "train_samples": 11030,
6
- "train_samples_per_second": 0.799,
7
- "train_steps_per_second": 0.025
8
- }
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/trainer_state-checkpoint.json DELETED
@@ -1,103 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 0.029006526468455404,
5
- "global_step": 10,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.0,
12
- "learning_rate": 3.75e-06,
13
- "loss": 13.605,
14
- "step": 1
15
- },
16
- {
17
- "epoch": 0.01,
18
- "learning_rate": 7.5e-06,
19
- "loss": 11.0063,
20
- "step": 2
21
- },
22
- {
23
- "epoch": 0.01,
24
- "learning_rate": 1.1249999999999999e-05,
25
- "loss": 11.6693,
26
- "step": 3
27
- },
28
- {
29
- "epoch": 0.01,
30
- "learning_rate": 1.5e-05,
31
- "loss": 13.432,
32
- "step": 4
33
- },
34
- {
35
- "epoch": 0.01,
36
- "learning_rate": 1.875e-05,
37
- "loss": 10.3458,
38
- "step": 5
39
- },
40
- {
41
- "epoch": 0.01,
42
- "eval_loss": 15.09913444519043,
43
- "eval_runtime": 139.8551,
44
- "eval_samples_per_second": 33.034,
45
- "eval_steps_per_second": 4.133,
46
- "eval_wer": 1.0,
47
- "step": 5
48
- },
49
- {
50
- "epoch": 0.02,
51
- "learning_rate": 2.2499999999999998e-05,
52
- "loss": 15.2451,
53
- "step": 6
54
- },
55
- {
56
- "epoch": 0.02,
57
- "learning_rate": 2.625e-05,
58
- "loss": 10.0481,
59
- "step": 7
60
- },
61
- {
62
- "epoch": 0.02,
63
- "learning_rate": 3e-05,
64
- "loss": 12.3838,
65
- "step": 8
66
- },
67
- {
68
- "epoch": 0.03,
69
- "learning_rate": 3.3749999999999994e-05,
70
- "loss": 11.9858,
71
- "step": 9
72
- },
73
- {
74
- "epoch": 0.03,
75
- "learning_rate": 3.75e-05,
76
- "loss": 11.9029,
77
- "step": 10
78
- },
79
- {
80
- "epoch": 0.03,
81
- "eval_loss": 14.213573455810547,
82
- "eval_runtime": 160.2552,
83
- "eval_samples_per_second": 28.829,
84
- "eval_steps_per_second": 3.607,
85
- "eval_wer": 1.0,
86
- "step": 10
87
- },
88
- {
89
- "epoch": 0.03,
90
- "step": 10,
91
- "total_flos": 4.405923604988928e+16,
92
- "train_loss": 12.162414741516113,
93
- "train_runtime": 400.3916,
94
- "train_samples_per_second": 0.799,
95
- "train_steps_per_second": 0.025
96
- }
97
- ],
98
- "max_steps": 10,
99
- "num_train_epochs": 1,
100
- "total_flos": 4.405923604988928e+16,
101
- "trial_name": null,
102
- "trial_params": null
103
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/vocab-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "ä": 27, "å": 28, "é": 29, "ô": 30, "ö": 31, "ü": 32, "|": 0, "[UNK]": 33, "[PAD]": 34}
 
 
README.md DELETED
@@ -1,67 +0,0 @@
1
- ---
2
- language:
3
- - sv-SE
4
- license: apache-2.0
5
- tags:
6
- - automatic-speech-recognition
7
- - mozilla-foundation/common_voice_7_0
8
- - generated_from_trainer
9
- datasets:
10
- - common_voice
11
- model-index:
12
- - name: ''
13
- results: []
14
- ---
15
-
16
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
- should probably proofread and complete it, then remove this comment. -->
18
-
19
- #
20
-
21
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - SV-SE dataset.
22
- It achieves the following results on the evaluation set:
23
- - Loss: 2.9141
24
- - Wer: 1.0
25
-
26
- ## Model description
27
-
28
- More information needed
29
-
30
- ## Intended uses & limitations
31
-
32
- More information needed
33
-
34
- ## Training and evaluation data
35
-
36
- More information needed
37
-
38
- ## Training procedure
39
-
40
- ### Training hyperparameters
41
-
42
- The following hyperparameters were used during training:
43
- - learning_rate: 7.5e-05
44
- - train_batch_size: 8
45
- - eval_batch_size: 8
46
- - seed: 42
47
- - gradient_accumulation_steps: 4
48
- - total_train_batch_size: 32
49
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
- - lr_scheduler_type: linear
51
- - lr_scheduler_warmup_steps: 20
52
- - num_epochs: 2.0
53
- - mixed_precision_training: Native AMP
54
-
55
- ### Training results
56
-
57
- | Training Loss | Epoch | Step | Validation Loss | Wer |
58
- |:-------------:|:-----:|:----:|:---------------:|:---:|
59
- | 2.9357 | 1.45 | 500 | 2.9459 | 1.0 |
60
-
61
-
62
- ### Framework versions
63
-
64
- - Transformers 4.16.0.dev0
65
- - Pytorch 1.10.1+cu102
66
- - Datasets 1.17.1.dev0
67
- - Tokenizers 0.11.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
added_tokens.json CHANGED
@@ -1 +1 @@
1
- {"<s>": 35, "</s>": 36}
 
1
+ {"<s>": 51, "</s>": 52}
all_results.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "epoch": 2.0,
3
- "eval_loss": 2.91414213180542,
4
- "eval_runtime": 133.9783,
5
- "eval_samples": 4620,
6
- "eval_samples_per_second": 34.483,
7
- "eval_steps_per_second": 4.314,
8
- "eval_wer": 1.0,
9
- "train_loss": 3.289040254992108,
10
- "train_runtime": 1292.4856,
11
- "train_samples": 11030,
12
- "train_samples_per_second": 17.068,
13
- "train_steps_per_second": 0.532
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-400/config.json DELETED
@@ -1,107 +0,0 @@
1
- {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.1,
4
- "adapter_kernel_size": 3,
5
- "adapter_stride": 2,
6
- "add_adapter": false,
7
- "apply_spec_augment": true,
8
- "architectures": [
9
- "Wav2Vec2ForCTC"
10
- ],
11
- "attention_dropout": 0.0,
12
- "bos_token_id": 1,
13
- "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
- "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
- "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
- ],
26
- "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
- ],
35
- "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
- ],
44
- "ctc_loss_reduction": "mean",
45
- "ctc_zero_infinity": false,
46
- "diversity_loss_weight": 0.1,
47
- "do_stable_layer_norm": true,
48
- "eos_token_id": 2,
49
- "feat_extract_activation": "gelu",
50
- "feat_extract_dropout": 0.0,
51
- "feat_extract_norm": "layer",
52
- "feat_proj_dropout": 0.0,
53
- "feat_quantizer_dropout": 0.0,
54
- "final_dropout": 0.0,
55
- "hidden_act": "gelu",
56
- "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
58
- "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
- "layer_norm_eps": 1e-05,
61
- "layerdrop": 0.0,
62
- "mask_feature_length": 64,
63
- "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.25,
65
- "mask_time_length": 10,
66
- "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.75,
68
- "model_type": "wav2vec2",
69
- "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
- "num_codevector_groups": 2,
72
- "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 34,
80
- "proj_codevector_dim": 768,
81
- "tdnn_dilation": [
82
- 1,
83
- 2,
84
- 3,
85
- 1,
86
- 1
87
- ],
88
- "tdnn_dim": [
89
- 512,
90
- 512,
91
- 512,
92
- 512,
93
- 1500
94
- ],
95
- "tdnn_kernel": [
96
- 5,
97
- 3,
98
- 3,
99
- 1,
100
- 1
101
- ],
102
- "torch_dtype": "float32",
103
- "transformers_version": "4.16.0.dev0",
104
- "use_weighted_layer_sum": false,
105
- "vocab_size": 37,
106
- "xvector_output_dim": 512
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-400/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:16fdeeb953ce80d82d1ba249d99ae68b45e80c943e36a80b8a6517c275a7b594
3
- size 2490362385
 
 
 
 
checkpoint-400/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:65431ecd0a10e000abb091aa8e3064f6e344c3e7771a07e792b6c0470ee8092c
3
- size 1262075377
 
 
 
 
checkpoint-400/trainer_state.json DELETED
@@ -1,328 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.1624365482233503,
5
- "global_step": 400,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.03,
12
- "learning_rate": 3.75e-05,
13
- "loss": 12.1562,
14
- "step": 10
15
- },
16
- {
17
- "epoch": 0.06,
18
- "learning_rate": 7.125e-05,
19
- "loss": 8.7679,
20
- "step": 20
21
- },
22
- {
23
- "epoch": 0.09,
24
- "learning_rate": 7.398952095808383e-05,
25
- "loss": 5.3683,
26
- "step": 30
27
- },
28
- {
29
- "epoch": 0.12,
30
- "learning_rate": 7.286676646706586e-05,
31
- "loss": 4.3219,
32
- "step": 40
33
- },
34
- {
35
- "epoch": 0.15,
36
- "learning_rate": 7.17440119760479e-05,
37
- "loss": 3.7182,
38
- "step": 50
39
- },
40
- {
41
- "epoch": 0.15,
42
- "eval_loss": 3.836604595184326,
43
- "eval_runtime": 133.4846,
44
- "eval_samples_per_second": 34.611,
45
- "eval_steps_per_second": 4.33,
46
- "eval_wer": 1.0,
47
- "step": 50
48
- },
49
- {
50
- "epoch": 0.17,
51
- "learning_rate": 7.062125748502993e-05,
52
- "loss": 3.478,
53
- "step": 60
54
- },
55
- {
56
- "epoch": 0.2,
57
- "learning_rate": 6.949850299401197e-05,
58
- "loss": 3.4492,
59
- "step": 70
60
- },
61
- {
62
- "epoch": 0.23,
63
- "learning_rate": 6.837574850299401e-05,
64
- "loss": 3.3928,
65
- "step": 80
66
- },
67
- {
68
- "epoch": 0.26,
69
- "learning_rate": 6.725299401197604e-05,
70
- "loss": 3.3183,
71
- "step": 90
72
- },
73
- {
74
- "epoch": 0.29,
75
- "learning_rate": 6.613023952095809e-05,
76
- "loss": 3.2075,
77
- "step": 100
78
- },
79
- {
80
- "epoch": 0.29,
81
- "eval_loss": 3.258362293243408,
82
- "eval_runtime": 126.6078,
83
- "eval_samples_per_second": 36.491,
84
- "eval_steps_per_second": 4.565,
85
- "eval_wer": 1.0,
86
- "step": 100
87
- },
88
- {
89
- "epoch": 0.32,
90
- "learning_rate": 6.500748502994012e-05,
91
- "loss": 3.14,
92
- "step": 110
93
- },
94
- {
95
- "epoch": 0.35,
96
- "learning_rate": 6.388473053892215e-05,
97
- "loss": 3.1281,
98
- "step": 120
99
- },
100
- {
101
- "epoch": 0.38,
102
- "learning_rate": 6.276197604790418e-05,
103
- "loss": 3.0987,
104
- "step": 130
105
- },
106
- {
107
- "epoch": 0.41,
108
- "learning_rate": 6.163922155688622e-05,
109
- "loss": 3.1003,
110
- "step": 140
111
- },
112
- {
113
- "epoch": 0.44,
114
- "learning_rate": 6.0516467065868256e-05,
115
- "loss": 3.0922,
116
- "step": 150
117
- },
118
- {
119
- "epoch": 0.44,
120
- "eval_loss": 3.127869129180908,
121
- "eval_runtime": 126.3837,
122
- "eval_samples_per_second": 36.555,
123
- "eval_steps_per_second": 4.573,
124
- "eval_wer": 1.0,
125
- "step": 150
126
- },
127
- {
128
- "epoch": 0.46,
129
- "learning_rate": 5.9393712574850293e-05,
130
- "loss": 3.0588,
131
- "step": 160
132
- },
133
- {
134
- "epoch": 0.49,
135
- "learning_rate": 5.827095808383233e-05,
136
- "loss": 3.0477,
137
- "step": 170
138
- },
139
- {
140
- "epoch": 0.52,
141
- "learning_rate": 5.714820359281436e-05,
142
- "loss": 3.045,
143
- "step": 180
144
- },
145
- {
146
- "epoch": 0.55,
147
- "learning_rate": 5.602544910179641e-05,
148
- "loss": 3.0439,
149
- "step": 190
150
- },
151
- {
152
- "epoch": 0.58,
153
- "learning_rate": 5.490269461077844e-05,
154
- "loss": 3.0846,
155
- "step": 200
156
- },
157
- {
158
- "epoch": 0.58,
159
- "eval_loss": 3.079519271850586,
160
- "eval_runtime": 125.7215,
161
- "eval_samples_per_second": 36.748,
162
- "eval_steps_per_second": 4.597,
163
- "eval_wer": 1.0,
164
- "step": 200
165
- },
166
- {
167
- "epoch": 0.61,
168
- "learning_rate": 5.3779940119760477e-05,
169
- "loss": 3.0512,
170
- "step": 210
171
- },
172
- {
173
- "epoch": 0.64,
174
- "learning_rate": 5.265718562874251e-05,
175
- "loss": 3.0143,
176
- "step": 220
177
- },
178
- {
179
- "epoch": 0.67,
180
- "learning_rate": 5.1534431137724546e-05,
181
- "loss": 3.0387,
182
- "step": 230
183
- },
184
- {
185
- "epoch": 0.7,
186
- "learning_rate": 5.0411676646706584e-05,
187
- "loss": 3.0311,
188
- "step": 240
189
- },
190
- {
191
- "epoch": 0.73,
192
- "learning_rate": 4.9288922155688615e-05,
193
- "loss": 3.0417,
194
- "step": 250
195
- },
196
- {
197
- "epoch": 0.73,
198
- "eval_loss": 3.069390058517456,
199
- "eval_runtime": 125.7339,
200
- "eval_samples_per_second": 36.744,
201
- "eval_steps_per_second": 4.597,
202
- "eval_wer": 1.0,
203
- "step": 250
204
- },
205
- {
206
- "epoch": 0.75,
207
- "learning_rate": 4.816616766467066e-05,
208
- "loss": 3.0219,
209
- "step": 260
210
- },
211
- {
212
- "epoch": 0.78,
213
- "learning_rate": 4.704341317365269e-05,
214
- "loss": 3.0194,
215
- "step": 270
216
- },
217
- {
218
- "epoch": 0.81,
219
- "learning_rate": 4.592065868263473e-05,
220
- "loss": 2.9974,
221
- "step": 280
222
- },
223
- {
224
- "epoch": 0.84,
225
- "learning_rate": 4.479790419161676e-05,
226
- "loss": 2.9996,
227
- "step": 290
228
- },
229
- {
230
- "epoch": 0.87,
231
- "learning_rate": 4.36751497005988e-05,
232
- "loss": 3.0016,
233
- "step": 300
234
- },
235
- {
236
- "epoch": 0.87,
237
- "eval_loss": 3.0347490310668945,
238
- "eval_runtime": 132.3039,
239
- "eval_samples_per_second": 34.92,
240
- "eval_steps_per_second": 4.369,
241
- "eval_wer": 1.0,
242
- "step": 300
243
- },
244
- {
245
- "epoch": 0.9,
246
- "learning_rate": 4.255239520958083e-05,
247
- "loss": 3.0052,
248
- "step": 310
249
- },
250
- {
251
- "epoch": 0.93,
252
- "learning_rate": 4.142964071856287e-05,
253
- "loss": 2.9826,
254
- "step": 320
255
- },
256
- {
257
- "epoch": 0.96,
258
- "learning_rate": 4.030688622754491e-05,
259
- "loss": 2.9747,
260
- "step": 330
261
- },
262
- {
263
- "epoch": 0.99,
264
- "learning_rate": 3.918413173652694e-05,
265
- "loss": 2.9617,
266
- "step": 340
267
- },
268
- {
269
- "epoch": 1.02,
270
- "learning_rate": 3.806137724550898e-05,
271
- "loss": 3.2053,
272
- "step": 350
273
- },
274
- {
275
- "epoch": 1.02,
276
- "eval_loss": 2.984886407852173,
277
- "eval_runtime": 149.1508,
278
- "eval_samples_per_second": 30.975,
279
- "eval_steps_per_second": 3.875,
280
- "eval_wer": 1.0,
281
- "step": 350
282
- },
283
- {
284
- "epoch": 1.05,
285
- "learning_rate": 3.693862275449102e-05,
286
- "loss": 2.9665,
287
- "step": 360
288
- },
289
- {
290
- "epoch": 1.08,
291
- "learning_rate": 3.581586826347305e-05,
292
- "loss": 2.9641,
293
- "step": 370
294
- },
295
- {
296
- "epoch": 1.1,
297
- "learning_rate": 3.469311377245509e-05,
298
- "loss": 2.9484,
299
- "step": 380
300
- },
301
- {
302
- "epoch": 1.13,
303
- "learning_rate": 3.3570359281437126e-05,
304
- "loss": 2.9494,
305
- "step": 390
306
- },
307
- {
308
- "epoch": 1.16,
309
- "learning_rate": 3.244760479041916e-05,
310
- "loss": 2.9698,
311
- "step": 400
312
- },
313
- {
314
- "epoch": 1.16,
315
- "eval_loss": 2.989494562149048,
316
- "eval_runtime": 150.6903,
317
- "eval_samples_per_second": 30.659,
318
- "eval_steps_per_second": 3.836,
319
- "eval_wer": 1.0,
320
- "step": 400
321
- }
322
- ],
323
- "max_steps": 688,
324
- "num_train_epochs": 2,
325
- "total_flos": 1.1900340345446784e+18,
326
- "trial_name": null,
327
- "trial_params": null
328
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-450/config.json DELETED
@@ -1,107 +0,0 @@
1
- {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.1,
4
- "adapter_kernel_size": 3,
5
- "adapter_stride": 2,
6
- "add_adapter": false,
7
- "apply_spec_augment": true,
8
- "architectures": [
9
- "Wav2Vec2ForCTC"
10
- ],
11
- "attention_dropout": 0.0,
12
- "bos_token_id": 1,
13
- "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
- "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
- "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
- ],
26
- "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
- ],
35
- "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
- ],
44
- "ctc_loss_reduction": "mean",
45
- "ctc_zero_infinity": false,
46
- "diversity_loss_weight": 0.1,
47
- "do_stable_layer_norm": true,
48
- "eos_token_id": 2,
49
- "feat_extract_activation": "gelu",
50
- "feat_extract_dropout": 0.0,
51
- "feat_extract_norm": "layer",
52
- "feat_proj_dropout": 0.0,
53
- "feat_quantizer_dropout": 0.0,
54
- "final_dropout": 0.0,
55
- "hidden_act": "gelu",
56
- "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
58
- "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
- "layer_norm_eps": 1e-05,
61
- "layerdrop": 0.0,
62
- "mask_feature_length": 64,
63
- "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.25,
65
- "mask_time_length": 10,
66
- "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.75,
68
- "model_type": "wav2vec2",
69
- "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
- "num_codevector_groups": 2,
72
- "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 34,
80
- "proj_codevector_dim": 768,
81
- "tdnn_dilation": [
82
- 1,
83
- 2,
84
- 3,
85
- 1,
86
- 1
87
- ],
88
- "tdnn_dim": [
89
- 512,
90
- 512,
91
- 512,
92
- 512,
93
- 1500
94
- ],
95
- "tdnn_kernel": [
96
- 5,
97
- 3,
98
- 3,
99
- 1,
100
- 1
101
- ],
102
- "torch_dtype": "float32",
103
- "transformers_version": "4.16.0.dev0",
104
- "use_weighted_layer_sum": false,
105
- "vocab_size": 37,
106
- "xvector_output_dim": 512
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-450/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d20a2e7a06e4c6fe1ca1763518fe26e3bd509e54e5c6ce336b1cdd4ad352fc8
3
- size 2490362385
 
 
 
 
checkpoint-450/preprocessor_config.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
- "feature_size": 1,
5
- "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": true,
8
- "sampling_rate": 16000
9
- }
 
 
 
 
 
 
 
 
 
 
checkpoint-450/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:64a67aef87c3664c6c90fef5a16264df1a1e9ec09017448f7dd3962f7e7ed3cd
3
- size 1262075377
 
 
 
 
checkpoint-450/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:171f5d4a1db63b5ecb87951d275cbcef882e72c7875f0439f386ec2a0cc474fc
3
- size 14503
 
 
 
 
checkpoint-450/scaler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b437a2168639a7226a6616cf299b21e2671732a153df5f941f17a2c38a20459
3
- size 559
 
 
 
 
checkpoint-450/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:89db00b13c6183184d8d5a3b77b2b0c26843c6c480825e0951a03deff712e541
3
- size 623
 
 
 
 
checkpoint-450/trainer_state.json DELETED
@@ -1,367 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.3074691805656273,
5
- "global_step": 450,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.03,
12
- "learning_rate": 3.75e-05,
13
- "loss": 12.1562,
14
- "step": 10
15
- },
16
- {
17
- "epoch": 0.06,
18
- "learning_rate": 7.125e-05,
19
- "loss": 8.7679,
20
- "step": 20
21
- },
22
- {
23
- "epoch": 0.09,
24
- "learning_rate": 7.398952095808383e-05,
25
- "loss": 5.3683,
26
- "step": 30
27
- },
28
- {
29
- "epoch": 0.12,
30
- "learning_rate": 7.286676646706586e-05,
31
- "loss": 4.3219,
32
- "step": 40
33
- },
34
- {
35
- "epoch": 0.15,
36
- "learning_rate": 7.17440119760479e-05,
37
- "loss": 3.7182,
38
- "step": 50
39
- },
40
- {
41
- "epoch": 0.15,
42
- "eval_loss": 3.836604595184326,
43
- "eval_runtime": 133.4846,
44
- "eval_samples_per_second": 34.611,
45
- "eval_steps_per_second": 4.33,
46
- "eval_wer": 1.0,
47
- "step": 50
48
- },
49
- {
50
- "epoch": 0.17,
51
- "learning_rate": 7.062125748502993e-05,
52
- "loss": 3.478,
53
- "step": 60
54
- },
55
- {
56
- "epoch": 0.2,
57
- "learning_rate": 6.949850299401197e-05,
58
- "loss": 3.4492,
59
- "step": 70
60
- },
61
- {
62
- "epoch": 0.23,
63
- "learning_rate": 6.837574850299401e-05,
64
- "loss": 3.3928,
65
- "step": 80
66
- },
67
- {
68
- "epoch": 0.26,
69
- "learning_rate": 6.725299401197604e-05,
70
- "loss": 3.3183,
71
- "step": 90
72
- },
73
- {
74
- "epoch": 0.29,
75
- "learning_rate": 6.613023952095809e-05,
76
- "loss": 3.2075,
77
- "step": 100
78
- },
79
- {
80
- "epoch": 0.29,
81
- "eval_loss": 3.258362293243408,
82
- "eval_runtime": 126.6078,
83
- "eval_samples_per_second": 36.491,
84
- "eval_steps_per_second": 4.565,
85
- "eval_wer": 1.0,
86
- "step": 100
87
- },
88
- {
89
- "epoch": 0.32,
90
- "learning_rate": 6.500748502994012e-05,
91
- "loss": 3.14,
92
- "step": 110
93
- },
94
- {
95
- "epoch": 0.35,
96
- "learning_rate": 6.388473053892215e-05,
97
- "loss": 3.1281,
98
- "step": 120
99
- },
100
- {
101
- "epoch": 0.38,
102
- "learning_rate": 6.276197604790418e-05,
103
- "loss": 3.0987,
104
- "step": 130
105
- },
106
- {
107
- "epoch": 0.41,
108
- "learning_rate": 6.163922155688622e-05,
109
- "loss": 3.1003,
110
- "step": 140
111
- },
112
- {
113
- "epoch": 0.44,
114
- "learning_rate": 6.0516467065868256e-05,
115
- "loss": 3.0922,
116
- "step": 150
117
- },
118
- {
119
- "epoch": 0.44,
120
- "eval_loss": 3.127869129180908,
121
- "eval_runtime": 126.3837,
122
- "eval_samples_per_second": 36.555,
123
- "eval_steps_per_second": 4.573,
124
- "eval_wer": 1.0,
125
- "step": 150
126
- },
127
- {
128
- "epoch": 0.46,
129
- "learning_rate": 5.9393712574850293e-05,
130
- "loss": 3.0588,
131
- "step": 160
132
- },
133
- {
134
- "epoch": 0.49,
135
- "learning_rate": 5.827095808383233e-05,
136
- "loss": 3.0477,
137
- "step": 170
138
- },
139
- {
140
- "epoch": 0.52,
141
- "learning_rate": 5.714820359281436e-05,
142
- "loss": 3.045,
143
- "step": 180
144
- },
145
- {
146
- "epoch": 0.55,
147
- "learning_rate": 5.602544910179641e-05,
148
- "loss": 3.0439,
149
- "step": 190
150
- },
151
- {
152
- "epoch": 0.58,
153
- "learning_rate": 5.490269461077844e-05,
154
- "loss": 3.0846,
155
- "step": 200
156
- },
157
- {
158
- "epoch": 0.58,
159
- "eval_loss": 3.079519271850586,
160
- "eval_runtime": 125.7215,
161
- "eval_samples_per_second": 36.748,
162
- "eval_steps_per_second": 4.597,
163
- "eval_wer": 1.0,
164
- "step": 200
165
- },
166
- {
167
- "epoch": 0.61,
168
- "learning_rate": 5.3779940119760477e-05,
169
- "loss": 3.0512,
170
- "step": 210
171
- },
172
- {
173
- "epoch": 0.64,
174
- "learning_rate": 5.265718562874251e-05,
175
- "loss": 3.0143,
176
- "step": 220
177
- },
178
- {
179
- "epoch": 0.67,
180
- "learning_rate": 5.1534431137724546e-05,
181
- "loss": 3.0387,
182
- "step": 230
183
- },
184
- {
185
- "epoch": 0.7,
186
- "learning_rate": 5.0411676646706584e-05,
187
- "loss": 3.0311,
188
- "step": 240
189
- },
190
- {
191
- "epoch": 0.73,
192
- "learning_rate": 4.9288922155688615e-05,
193
- "loss": 3.0417,
194
- "step": 250
195
- },
196
- {
197
- "epoch": 0.73,
198
- "eval_loss": 3.069390058517456,
199
- "eval_runtime": 125.7339,
200
- "eval_samples_per_second": 36.744,
201
- "eval_steps_per_second": 4.597,
202
- "eval_wer": 1.0,
203
- "step": 250
204
- },
205
- {
206
- "epoch": 0.75,
207
- "learning_rate": 4.816616766467066e-05,
208
- "loss": 3.0219,
209
- "step": 260
210
- },
211
- {
212
- "epoch": 0.78,
213
- "learning_rate": 4.704341317365269e-05,
214
- "loss": 3.0194,
215
- "step": 270
216
- },
217
- {
218
- "epoch": 0.81,
219
- "learning_rate": 4.592065868263473e-05,
220
- "loss": 2.9974,
221
- "step": 280
222
- },
223
- {
224
- "epoch": 0.84,
225
- "learning_rate": 4.479790419161676e-05,
226
- "loss": 2.9996,
227
- "step": 290
228
- },
229
- {
230
- "epoch": 0.87,
231
- "learning_rate": 4.36751497005988e-05,
232
- "loss": 3.0016,
233
- "step": 300
234
- },
235
- {
236
- "epoch": 0.87,
237
- "eval_loss": 3.0347490310668945,
238
- "eval_runtime": 132.3039,
239
- "eval_samples_per_second": 34.92,
240
- "eval_steps_per_second": 4.369,
241
- "eval_wer": 1.0,
242
- "step": 300
243
- },
244
- {
245
- "epoch": 0.9,
246
- "learning_rate": 4.255239520958083e-05,
247
- "loss": 3.0052,
248
- "step": 310
249
- },
250
- {
251
- "epoch": 0.93,
252
- "learning_rate": 4.142964071856287e-05,
253
- "loss": 2.9826,
254
- "step": 320
255
- },
256
- {
257
- "epoch": 0.96,
258
- "learning_rate": 4.030688622754491e-05,
259
- "loss": 2.9747,
260
- "step": 330
261
- },
262
- {
263
- "epoch": 0.99,
264
- "learning_rate": 3.918413173652694e-05,
265
- "loss": 2.9617,
266
- "step": 340
267
- },
268
- {
269
- "epoch": 1.02,
270
- "learning_rate": 3.806137724550898e-05,
271
- "loss": 3.2053,
272
- "step": 350
273
- },
274
- {
275
- "epoch": 1.02,
276
- "eval_loss": 2.984886407852173,
277
- "eval_runtime": 149.1508,
278
- "eval_samples_per_second": 30.975,
279
- "eval_steps_per_second": 3.875,
280
- "eval_wer": 1.0,
281
- "step": 350
282
- },
283
- {
284
- "epoch": 1.05,
285
- "learning_rate": 3.693862275449102e-05,
286
- "loss": 2.9665,
287
- "step": 360
288
- },
289
- {
290
- "epoch": 1.08,
291
- "learning_rate": 3.581586826347305e-05,
292
- "loss": 2.9641,
293
- "step": 370
294
- },
295
- {
296
- "epoch": 1.1,
297
- "learning_rate": 3.469311377245509e-05,
298
- "loss": 2.9484,
299
- "step": 380
300
- },
301
- {
302
- "epoch": 1.13,
303
- "learning_rate": 3.3570359281437126e-05,
304
- "loss": 2.9494,
305
- "step": 390
306
- },
307
- {
308
- "epoch": 1.16,
309
- "learning_rate": 3.244760479041916e-05,
310
- "loss": 2.9698,
311
- "step": 400
312
- },
313
- {
314
- "epoch": 1.16,
315
- "eval_loss": 2.989494562149048,
316
- "eval_runtime": 150.6903,
317
- "eval_samples_per_second": 30.659,
318
- "eval_steps_per_second": 3.836,
319
- "eval_wer": 1.0,
320
- "step": 400
321
- },
322
- {
323
- "epoch": 1.19,
324
- "learning_rate": 3.1324850299401195e-05,
325
- "loss": 2.9664,
326
- "step": 410
327
- },
328
- {
329
- "epoch": 1.22,
330
- "learning_rate": 3.020209580838323e-05,
331
- "loss": 2.9494,
332
- "step": 420
333
- },
334
- {
335
- "epoch": 1.25,
336
- "learning_rate": 2.9079341317365265e-05,
337
- "loss": 2.935,
338
- "step": 430
339
- },
340
- {
341
- "epoch": 1.28,
342
- "learning_rate": 2.7956586826347306e-05,
343
- "loss": 2.9397,
344
- "step": 440
345
- },
346
- {
347
- "epoch": 1.31,
348
- "learning_rate": 2.683383233532934e-05,
349
- "loss": 2.9485,
350
- "step": 450
351
- },
352
- {
353
- "epoch": 1.31,
354
- "eval_loss": 2.9584460258483887,
355
- "eval_runtime": 140.6358,
356
- "eval_samples_per_second": 32.851,
357
- "eval_steps_per_second": 4.11,
358
- "eval_wer": 1.0,
359
- "step": 450
360
- }
361
- ],
362
- "max_steps": 688,
363
- "num_train_epochs": 2,
364
- "total_flos": 1.336010418574825e+18,
365
- "trial_name": null,
366
- "trial_params": null
367
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{checkpoint-500 → checkpoint-5}/config.json RENAMED
@@ -1,6 +1,6 @@
1
  {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.1,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
@@ -11,35 +11,23 @@
11
  "attention_dropout": 0.0,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
  "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
  "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
  ],
26
  "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
  ],
35
  "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
  ],
44
  "ctc_loss_reduction": "mean",
45
  "ctc_zero_infinity": false,
@@ -54,30 +42,31 @@
54
  "final_dropout": 0.0,
55
  "hidden_act": "gelu",
56
  "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
 
58
  "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
  "layer_norm_eps": 1e-05,
61
  "layerdrop": 0.0,
62
- "mask_feature_length": 64,
63
  "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.25,
65
  "mask_time_length": 10,
66
  "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.75,
68
  "model_type": "wav2vec2",
69
  "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
  "num_codevector_groups": 2,
72
  "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 34,
80
- "proj_codevector_dim": 768,
81
  "tdnn_dilation": [
82
  1,
83
  2,
@@ -102,6 +91,6 @@
102
  "torch_dtype": "float32",
103
  "transformers_version": "4.16.0.dev0",
104
  "use_weighted_layer_sum": false,
105
- "vocab_size": 37,
106
  "xvector_output_dim": 512
107
  }
 
1
  {
2
+ "_name_or_path": "hf-test/xls-r-dummy",
3
+ "activation_dropout": 0.0,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
 
11
  "attention_dropout": 0.0,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
  "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
  "conv_dim": [
18
+ 32,
19
+ 32,
20
+ 32
 
 
 
 
21
  ],
22
  "conv_kernel": [
23
+ 8,
24
+ 8,
25
+ 8
 
 
 
 
26
  ],
27
  "conv_stride": [
28
+ 4,
29
+ 4,
30
+ 4
 
 
 
 
31
  ],
32
  "ctc_loss_reduction": "mean",
33
  "ctc_zero_infinity": false,
 
42
  "final_dropout": 0.0,
43
  "hidden_act": "gelu",
44
  "hidden_dropout": 0.0,
45
+ "hidden_dropout_prob": 0.1,
46
+ "hidden_size": 16,
47
  "initializer_range": 0.02,
48
+ "intermediate_size": 20,
49
  "layer_norm_eps": 1e-05,
50
  "layerdrop": 0.0,
51
+ "mask_feature_length": 10,
52
  "mask_feature_min_masks": 0,
53
+ "mask_feature_prob": 0.0,
54
  "mask_time_length": 10,
55
  "mask_time_min_masks": 2,
56
+ "mask_time_prob": 0.05,
57
  "model_type": "wav2vec2",
58
  "num_adapter_layers": 3,
59
+ "num_attention_heads": 2,
60
  "num_codevector_groups": 2,
61
  "num_codevectors_per_group": 320,
62
+ "num_conv_pos_embedding_groups": 2,
63
+ "num_conv_pos_embeddings": 16,
64
+ "num_feat_extract_layers": 3,
65
+ "num_hidden_layers": 4,
66
+ "num_negatives": 10,
67
+ "output_hidden_size": 16,
68
+ "pad_token_id": 50,
69
+ "proj_codevector_dim": 256,
70
  "tdnn_dilation": [
71
  1,
72
  2,
 
91
  "torch_dtype": "float32",
92
  "transformers_version": "4.16.0.dev0",
93
  "use_weighted_layer_sum": false,
94
+ "vocab_size": 53,
95
  "xvector_output_dim": 512
96
  }
checkpoint-400/training_args.bin → checkpoint-5/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77806a16ec6ef209c8a5c1c085159a0bc8e45c23f2d143f2c13e01527f13b5b2
3
- size 2991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a245c50c71ce92148ecd921cac7789c454842de546c72d14ed7db3cfb2368f57
3
+ size 130385
{checkpoint-400 → checkpoint-5}/preprocessor_config.json RENAMED
@@ -3,7 +3,7 @@
3
  "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": true,
8
  "sampling_rate": 16000
9
  }
 
3
  "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": false,
8
  "sampling_rate": 16000
9
  }
checkpoint-400/scaler.pt → checkpoint-5/pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3828e6a0e34e076fa271e3dd1c08f47dc96711f380d9585d89de05befff54169
3
- size 559
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f707ecc79ae941e56ffdb7641b649a684ab4c49c79e26a5c0e388853b9c3416
3
+ size 143910
{checkpoint-400 → checkpoint-5}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a426a909abceadbfb6682c295322fafea2b96fc77d190014718f2a843f386bc4
3
  size 14567
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed2ba2d62f4307f8e3a73e07e52e7c639a10c1c25d1552bde4eee08c6914110c
3
  size 14567
{checkpoint-500 → checkpoint-5}/scaler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c8bceadebe118ff459b01a775a8e9b38a6b8302c162d022f78d3646163e6486
3
  size 559
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a648873cde1f456464b7741cbb832250d15acff5f65edbdbdecb0e392b63153
3
  size 559
{checkpoint-400 → checkpoint-5}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b73abf21b39473fffe8d985c5918328870ddc7f68ec8e4b77b7e7ea44506a1b1
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1570f1540bbb5840ac5e01c84d0494b5fa441945dfd0c990d275c242d4173037
3
  size 623
checkpoint-5/trainer_state.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.014204545454545454,
5
+ "global_step": 5,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [],
10
+ "max_steps": 10,
11
+ "num_train_epochs": 1,
12
+ "total_flos": 273921937920.0,
13
+ "trial_name": null,
14
+ "trial_params": null
15
+ }
{checkpoint-450 → checkpoint-5}/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77806a16ec6ef209c8a5c1c085159a0bc8e45c23f2d143f2c13e01527f13b5b2
3
  size 2991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13f03d8c8ae9bd8496ddb1b6ef8d282d15b54e9aee2f515f000a57e9ac24d7c2
3
  size 2991
checkpoint-500/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2dbd398c0993452243cb7df79d58793489c5b41d1a1000e547261b81959e2f45
3
- size 2490362385
 
 
 
 
checkpoint-500/preprocessor_config.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
- "feature_size": 1,
5
- "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": true,
8
- "sampling_rate": 16000
9
- }
 
 
 
 
 
 
 
 
 
 
checkpoint-500/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed159635bd947770842e25d915d8157fd198d2f06d5db2476c6663627e3beee7
3
- size 1262075377
 
 
 
 
checkpoint-500/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:32b82ac75b07e68dc3bc90e76f55a339f64dce724d87a9ae3c69ee46df441867
3
- size 14503
 
 
 
 
checkpoint-500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:15019bcd1c8fc8b1bd39c46d9e1196c2fa76648918a0024eb84229f57debcf7e
3
- size 623
 
 
 
 
checkpoint-500/trainer_state.json DELETED
@@ -1,55 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.4525018129079044,
5
- "global_step": 500,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.29,
12
- "learning_rate": 6.613023952095809e-05,
13
- "loss": 5.1206,
14
- "step": 100
15
- },
16
- {
17
- "epoch": 0.58,
18
- "learning_rate": 5.490269461077844e-05,
19
- "loss": 3.0901,
20
- "step": 200
21
- },
22
- {
23
- "epoch": 0.87,
24
- "learning_rate": 4.36751497005988e-05,
25
- "loss": 3.0224,
26
- "step": 300
27
- },
28
- {
29
- "epoch": 1.16,
30
- "learning_rate": 3.244760479041916e-05,
31
- "loss": 2.9922,
32
- "step": 400
33
- },
34
- {
35
- "epoch": 1.45,
36
- "learning_rate": 2.1220059880239517e-05,
37
- "loss": 2.9357,
38
- "step": 500
39
- },
40
- {
41
- "epoch": 1.45,
42
- "eval_loss": 2.9458744525909424,
43
- "eval_runtime": 138.8724,
44
- "eval_samples_per_second": 33.268,
45
- "eval_steps_per_second": 4.162,
46
- "eval_wer": 1.0,
47
- "step": 500
48
- }
49
- ],
50
- "max_steps": 688,
51
- "num_train_epochs": 2,
52
- "total_flos": 1.4827194756605722e+18,
53
- "trial_name": null,
54
- "trial_params": null
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-500/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:07c8daeeea1ded5d5b75ab1c6033b9bed25c9ac1f192a365842399932683cfcc
3
- size 2991
 
 
 
 
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.1,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
@@ -11,35 +11,23 @@
11
  "attention_dropout": 0.0,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
  "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
  "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
  ],
26
  "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
  ],
35
  "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
  ],
44
  "ctc_loss_reduction": "mean",
45
  "ctc_zero_infinity": false,
@@ -54,30 +42,31 @@
54
  "final_dropout": 0.0,
55
  "hidden_act": "gelu",
56
  "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
 
58
  "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
  "layer_norm_eps": 1e-05,
61
  "layerdrop": 0.0,
62
- "mask_feature_length": 64,
63
  "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.25,
65
  "mask_time_length": 10,
66
  "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.75,
68
  "model_type": "wav2vec2",
69
  "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
  "num_codevector_groups": 2,
72
  "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 34,
80
- "proj_codevector_dim": 768,
81
  "tdnn_dilation": [
82
  1,
83
  2,
@@ -102,6 +91,6 @@
102
  "torch_dtype": "float32",
103
  "transformers_version": "4.16.0.dev0",
104
  "use_weighted_layer_sum": false,
105
- "vocab_size": 37,
106
  "xvector_output_dim": 512
107
  }
 
1
  {
2
+ "_name_or_path": "hf-test/xls-r-dummy",
3
+ "activation_dropout": 0.0,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
 
11
  "attention_dropout": 0.0,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
  "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
  "conv_dim": [
18
+ 32,
19
+ 32,
20
+ 32
 
 
 
 
21
  ],
22
  "conv_kernel": [
23
+ 8,
24
+ 8,
25
+ 8
 
 
 
 
26
  ],
27
  "conv_stride": [
28
+ 4,
29
+ 4,
30
+ 4
 
 
 
 
31
  ],
32
  "ctc_loss_reduction": "mean",
33
  "ctc_zero_infinity": false,
 
42
  "final_dropout": 0.0,
43
  "hidden_act": "gelu",
44
  "hidden_dropout": 0.0,
45
+ "hidden_dropout_prob": 0.1,
46
+ "hidden_size": 16,
47
  "initializer_range": 0.02,
48
+ "intermediate_size": 20,
49
  "layer_norm_eps": 1e-05,
50
  "layerdrop": 0.0,
51
+ "mask_feature_length": 10,
52
  "mask_feature_min_masks": 0,
53
+ "mask_feature_prob": 0.0,
54
  "mask_time_length": 10,
55
  "mask_time_min_masks": 2,
56
+ "mask_time_prob": 0.05,
57
  "model_type": "wav2vec2",
58
  "num_adapter_layers": 3,
59
+ "num_attention_heads": 2,
60
  "num_codevector_groups": 2,
61
  "num_codevectors_per_group": 320,
62
+ "num_conv_pos_embedding_groups": 2,
63
+ "num_conv_pos_embeddings": 16,
64
+ "num_feat_extract_layers": 3,
65
+ "num_hidden_layers": 4,
66
+ "num_negatives": 10,
67
+ "output_hidden_size": 16,
68
+ "pad_token_id": 50,
69
+ "proj_codevector_dim": 256,
70
  "tdnn_dilation": [
71
  1,
72
  2,
 
91
  "torch_dtype": "float32",
92
  "transformers_version": "4.16.0.dev0",
93
  "use_weighted_layer_sum": false,
94
+ "vocab_size": 53,
95
  "xvector_output_dim": 512
96
  }
eval_results.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "epoch": 2.0,
3
- "eval_loss": 2.91414213180542,
4
- "eval_runtime": 133.9783,
5
- "eval_samples": 4620,
6
- "eval_samples_per_second": 34.483,
7
- "eval_steps_per_second": 4.314,
8
- "eval_wer": 1.0
9
- }
 
 
 
 
 
 
 
 
 
 
preprocessor_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": true,
8
  "sampling_rate": 16000
9
  }
 
3
  "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": false,
8
  "sampling_rate": 16000
9
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c01bc756f9d54db8c90d48e01252abf1115ffe23b3c7297aa9b48389b4e9132
3
- size 1262075377
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f707ecc79ae941e56ffdb7641b649a684ab4c49c79e26a5c0e388853b9c3416
3
+ size 143910
run-dummy-ab-gpu.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_7_0" \
3
+ --model_name_or_path="hf-test/xls-r-dummy" \
4
+ --dataset_config_name="ab" \
5
+ --output_dir="./" \
6
+ --overwrite_output_dir \
7
+ --max_steps="10" \
8
+ --per_device_train_batch_size="2" \
9
+ --learning_rate="3e-4" \
10
+ --save_total_limit="1" \
11
+ --evaluation_strategy="steps" \
12
+ --text_column_name="sentence" \
13
+ --length_column_name="input_length" \
14
+ --save_steps="5" \
15
+ --layerdrop="0.0" \
16
+ --freeze_feature_encoder \
17
+ --gradient_checkpointing \
18
+ --fp16 \
19
+ --group_by_length \
20
+ --push_to_hub \
21
+ --use_auth_token \
22
+ --do_train --do_eval
run-dummy-sv-gpu.sh CHANGED
@@ -1,7 +1,7 @@
1
  python run_speech_recognition_ctc.py \
2
  --dataset_name="mozilla-foundation/common_voice_7_0" \
3
- --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
- --dataset_config_name="sv-SE" \
5
  --output_dir="./" \
6
  --overwrite_output_dir \
7
  --max_steps="10" \
 
1
  python run_speech_recognition_ctc.py \
2
  --dataset_name="mozilla-foundation/common_voice_7_0" \
3
+ --model_name_or_path="hf-test/xls-r-dummy" \
4
+ --dataset_config_name="ab" \
5
  --output_dir="./" \
6
  --overwrite_output_dir \
7
  --max_steps="10" \