DewiBrynJones commited on
Commit
000db46
1 Parent(s): 43a8de5

Training in progress, step 500

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/wav2vec2-large-xlsr-53
4
+ tags:
5
+ - automatic-speech-recognition
6
+ - techiaith/banc-trawsgrifiadau-bangor
7
+ - generated_from_trainer
8
+ datasets:
9
+ - banc-trawsgrifiadau-bangor
10
+ metrics:
11
+ - wer
12
+ model-index:
13
+ - name: wav2vec2-xlsr-53-ft-btb-cy
14
+ results:
15
+ - task:
16
+ name: Automatic Speech Recognition
17
+ type: automatic-speech-recognition
18
+ dataset:
19
+ name: TECHIAITH/BANC-TRAWSGRIFIADAU-BANGOR - CY
20
+ type: banc-trawsgrifiadau-bangor
21
+ config: cy
22
+ split: test
23
+ args: 'Config: cy, Training split: train, Eval split: test'
24
+ metrics:
25
+ - name: Wer
26
+ type: wer
27
+ value: 0.44248649327805
28
+ ---
29
+
30
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
31
+ should probably proofread and complete it, then remove this comment. -->
32
+
33
+ # wav2vec2-xlsr-53-ft-btb-cy
34
+
35
+ This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the TECHIAITH/BANC-TRAWSGRIFIADAU-BANGOR - CY dataset.
36
+ It achieves the following results on the evaluation set:
37
+ - Loss: 0.5992
38
+ - Wer: 0.4425
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 0.0003
58
+ - train_batch_size: 32
59
+ - eval_batch_size: 32
60
+ - seed: 42
61
+ - gradient_accumulation_steps: 2
62
+ - total_train_batch_size: 64
63
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
+ - lr_scheduler_type: linear
65
+ - lr_scheduler_warmup_steps: 800
66
+ - training_steps: 5000
67
+ - mixed_precision_training: Native AMP
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
72
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
73
+ | 4.6574 | 1.41 | 500 | 1.0889 | 0.7293 |
74
+ | 1.0887 | 2.83 | 1000 | 0.5992 | 0.4425 |
75
+ | 0.7223 | 4.24 | 1500 | 0.7267 | 0.4631 |
76
+ | 2.1295 | 5.66 | 2000 | 3.0492 | 0.9984 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.35.2
82
+ - Pytorch 2.1.1+cu121
83
+ - Datasets 2.15.0
84
+ - Tokenizers 0.15.0
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 52,
3
+ "<s>": 51
4
+ }
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.66,
3
+ "eval_loss": 0.5991869568824768,
4
+ "eval_runtime": 213.631,
5
+ "eval_samples": 5656,
6
+ "eval_samples_per_second": 26.476,
7
+ "eval_steps_per_second": 0.829,
8
+ "eval_wer": 0.44248649327805,
9
+ "train_loss": 1.8628226623535156,
10
+ "train_runtime": 6582.3574,
11
+ "train_samples": 22621,
12
+ "train_samples_per_second": 48.615,
13
+ "train_steps_per_second": 0.76
14
+ }
config.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.0,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForCTC"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 768,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "mean",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_dropout": 0.0,
52
+ "feat_extract_norm": "layer",
53
+ "feat_proj_dropout": 0.0,
54
+ "feat_quantizer_dropout": 0.0,
55
+ "final_dropout": 0.0,
56
+ "hidden_act": "gelu",
57
+ "hidden_dropout": 0.0,
58
+ "hidden_size": 1024,
59
+ "initializer_range": 0.02,
60
+ "intermediate_size": 4096,
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_channel_length": 10,
64
+ "mask_channel_min_space": 1,
65
+ "mask_channel_other": 0.0,
66
+ "mask_channel_prob": 0.0,
67
+ "mask_channel_selection": "static",
68
+ "mask_feature_length": 10,
69
+ "mask_feature_min_masks": 0,
70
+ "mask_feature_prob": 0.0,
71
+ "mask_time_length": 10,
72
+ "mask_time_min_masks": 2,
73
+ "mask_time_min_space": 1,
74
+ "mask_time_other": 0.0,
75
+ "mask_time_prob": 0.05,
76
+ "mask_time_selection": "static",
77
+ "model_type": "wav2vec2",
78
+ "num_adapter_layers": 3,
79
+ "num_attention_heads": 16,
80
+ "num_codevector_groups": 2,
81
+ "num_codevectors_per_group": 320,
82
+ "num_conv_pos_embedding_groups": 16,
83
+ "num_conv_pos_embeddings": 128,
84
+ "num_feat_extract_layers": 7,
85
+ "num_hidden_layers": 24,
86
+ "num_negatives": 100,
87
+ "output_hidden_size": 1024,
88
+ "pad_token_id": 50,
89
+ "proj_codevector_dim": 768,
90
+ "tdnn_dilation": [
91
+ 1,
92
+ 2,
93
+ 3,
94
+ 1,
95
+ 1
96
+ ],
97
+ "tdnn_dim": [
98
+ 512,
99
+ 512,
100
+ 512,
101
+ 512,
102
+ 1500
103
+ ],
104
+ "tdnn_kernel": [
105
+ 5,
106
+ 3,
107
+ 3,
108
+ 1,
109
+ 1
110
+ ],
111
+ "torch_dtype": "float32",
112
+ "transformers_version": "4.40.2",
113
+ "use_weighted_layer_sum": false,
114
+ "vocab_size": 53,
115
+ "xvector_output_dim": 512
116
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.66,
3
+ "eval_loss": 0.5991869568824768,
4
+ "eval_runtime": 213.631,
5
+ "eval_samples": 5656,
6
+ "eval_samples_per_second": 26.476,
7
+ "eval_steps_per_second": 0.829,
8
+ "eval_wer": 0.44248649327805
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c793a579aa1465e577413ca9c6efa256442715c012e2fd3f66b7f753f2821598
3
+ size 1262024780
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "processor_class": "Wav2Vec2Processor",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
runs/May10_07-12-18_98ffe85cdad5/events.out.tfevents.1715322135.98ffe85cdad5.29.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9957da7384dd8db295c4d33764cef8e719dc9673f6a9dc70783f94a128041d97
3
+ size 6436
runs/May10_07-37-48_617ab803152d/events.out.tfevents.1715323079.617ab803152d.31.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac08b54ed83ac276d462939d0fead883a576e2ceb215f5f786ae3ee456f0d9c
3
+ size 6436
runs/May10_07-39-17_6b3e2401f443/events.out.tfevents.1715323166.6b3e2401f443.30.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb227836f642ba813cdfb5ee10d07ab35132c8cb967b67b28d9e24d222418897
3
+ size 6965
runs/Nov23_07-55-30_d403632c4022/events.out.tfevents.1700726396.d403632c4022.1864.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:854013c54e1875ae9900dfa4d32c0306c2a74404ec9bc38e7e3d20f03c3f7502
3
+ size 8387
runs/Nov23_07-55-30_d403632c4022/events.out.tfevents.1700733346.d403632c4022.1864.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd66ec1aeb90159573d9a7bbee2d957b8bfffbcb0e39cea89072d29383f0ca83
3
+ size 406
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "[UNK]",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "49": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "50": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "51": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "52": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": true,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "processor_class": "Wav2Vec2Processor",
43
+ "replace_word_delimiter_char": " ",
44
+ "target_lang": null,
45
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
46
+ "unk_token": "[UNK]",
47
+ "word_delimiter_token": "|"
48
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.66,
3
+ "train_loss": 1.8628226623535156,
4
+ "train_runtime": 6582.3574,
5
+ "train_samples": 22621,
6
+ "train_samples_per_second": 48.615,
7
+ "train_steps_per_second": 0.76
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5991869568824768,
3
+ "best_model_checkpoint": "/models/hfhub/DewiBrynJones/wav2vec2-xlsr-53-ft-btb-cy/checkpoint-1000",
4
+ "epoch": 5.657708628005658,
5
+ "eval_steps": 500,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.13,
13
+ "learning_rate": 0.00014812499999999998,
14
+ "loss": 4.6574,
15
+ "step": 400
16
+ },
17
+ {
18
+ "epoch": 1.41,
19
+ "eval_loss": 1.0889060497283936,
20
+ "eval_runtime": 233.5812,
21
+ "eval_samples_per_second": 24.214,
22
+ "eval_steps_per_second": 0.758,
23
+ "eval_wer": 0.7293001633371027,
24
+ "step": 500
25
+ },
26
+ {
27
+ "epoch": 2.26,
28
+ "learning_rate": 0.00029662499999999996,
29
+ "loss": 1.0887,
30
+ "step": 800
31
+ },
32
+ {
33
+ "epoch": 2.83,
34
+ "eval_loss": 0.5991869568824768,
35
+ "eval_runtime": 234.4544,
36
+ "eval_samples_per_second": 24.124,
37
+ "eval_steps_per_second": 0.755,
38
+ "eval_wer": 0.44248649327805,
39
+ "step": 1000
40
+ },
41
+ {
42
+ "epoch": 3.39,
43
+ "learning_rate": 0.0002722142857142857,
44
+ "loss": 0.7223,
45
+ "step": 1200
46
+ },
47
+ {
48
+ "epoch": 4.24,
49
+ "eval_loss": 0.726735532283783,
50
+ "eval_runtime": 213.9298,
51
+ "eval_samples_per_second": 26.439,
52
+ "eval_steps_per_second": 0.827,
53
+ "eval_wer": 0.46312350797838925,
54
+ "step": 1500
55
+ },
56
+ {
57
+ "epoch": 4.53,
58
+ "learning_rate": 0.0002439285714285714,
59
+ "loss": 0.7162,
60
+ "step": 1600
61
+ },
62
+ {
63
+ "epoch": 5.66,
64
+ "learning_rate": 0.00021564285714285712,
65
+ "loss": 2.1295,
66
+ "step": 2000
67
+ },
68
+ {
69
+ "epoch": 5.66,
70
+ "eval_loss": 3.0491700172424316,
71
+ "eval_runtime": 217.4025,
72
+ "eval_samples_per_second": 26.016,
73
+ "eval_steps_per_second": 0.814,
74
+ "eval_wer": 0.9983980399547682,
75
+ "step": 2000
76
+ },
77
+ {
78
+ "epoch": 5.66,
79
+ "step": 2000,
80
+ "total_flos": 1.5333464503060175e+19,
81
+ "train_loss": 1.8628226623535156,
82
+ "train_runtime": 6582.3574,
83
+ "train_samples_per_second": 48.615,
84
+ "train_steps_per_second": 0.76
85
+ }
86
+ ],
87
+ "logging_steps": 400,
88
+ "max_steps": 5000,
89
+ "num_train_epochs": 15,
90
+ "save_steps": 500,
91
+ "total_flos": 1.5333464503060175e+19,
92
+ "trial_name": null,
93
+ "trial_params": null
94
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11db91cc3e5d1dc8b26dae3f198d038e603fc93e95040bd002af19679206ad3b
3
+ size 5048
vocab.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "#": 1,
3
+ "'": 2,
4
+ "-": 3,
5
+ "@": 4,
6
+ "[PAD]": 50,
7
+ "[UNK]": 49,
8
+ "a": 5,
9
+ "b": 6,
10
+ "c": 7,
11
+ "d": 8,
12
+ "e": 9,
13
+ "f": 10,
14
+ "g": 11,
15
+ "h": 12,
16
+ "i": 13,
17
+ "j": 14,
18
+ "k": 15,
19
+ "l": 16,
20
+ "m": 17,
21
+ "n": 18,
22
+ "o": 19,
23
+ "p": 20,
24
+ "q": 21,
25
+ "r": 22,
26
+ "s": 23,
27
+ "t": 24,
28
+ "u": 25,
29
+ "v": 26,
30
+ "w": 27,
31
+ "x": 28,
32
+ "y": 29,
33
+ "z": 30,
34
+ "|": 0,
35
+ "~": 31,
36
+ "á": 32,
37
+ "â": 33,
38
+ "è": 34,
39
+ "é": 35,
40
+ "ê": 36,
41
+ "î": 37,
42
+ "ï": 38,
43
+ "ó": 39,
44
+ "ô": 40,
45
+ "ö": 41,
46
+ "û": 42,
47
+ "ü": 43,
48
+ "č": 44,
49
+ "ł": 45,
50
+ "ŵ": 46,
51
+ "ŷ": 47,
52
+ "ž": 48
53
+ }