anjulRajendraSharma commited on
Commit
ab5465e
1 Parent(s): 31858f4

upload model

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +70 -0
  2. added_tokens.json +1 -0
  3. all_results.json +14 -0
  4. checkpoint-2500/config.json +121 -0
  5. checkpoint-2500/optimizer.pt +3 -0
  6. checkpoint-2500/preprocessor_config.json +9 -0
  7. checkpoint-2500/pytorch_model.bin +3 -0
  8. checkpoint-2500/rng_state.pth +3 -0
  9. checkpoint-2500/scaler.pt +3 -0
  10. checkpoint-2500/scheduler.pt +3 -0
  11. checkpoint-2500/trainer_state.json +0 -0
  12. checkpoint-2500/training_args.bin +3 -0
  13. checkpoint-3000/config.json +121 -0
  14. checkpoint-3000/optimizer.pt +3 -0
  15. checkpoint-3000/preprocessor_config.json +9 -0
  16. checkpoint-3000/pytorch_model.bin +3 -0
  17. checkpoint-3000/rng_state.pth +3 -0
  18. checkpoint-3000/scaler.pt +3 -0
  19. checkpoint-3000/scheduler.pt +3 -0
  20. checkpoint-3000/trainer_state.json +0 -0
  21. checkpoint-3000/training_args.bin +3 -0
  22. checkpoint-3500/config.json +121 -0
  23. checkpoint-3500/optimizer.pt +3 -0
  24. checkpoint-3500/preprocessor_config.json +9 -0
  25. checkpoint-3500/pytorch_model.bin +3 -0
  26. checkpoint-3500/rng_state.pth +3 -0
  27. checkpoint-3500/scaler.pt +3 -0
  28. checkpoint-3500/scheduler.pt +3 -0
  29. checkpoint-3500/trainer_state.json +0 -0
  30. checkpoint-3500/training_args.bin +3 -0
  31. config.json +121 -0
  32. eval_results.json +9 -0
  33. preprocessor_config.json +9 -0
  34. pytorch_model.bin +3 -0
  35. runs/Jan28_04-48-55_d2987f7d2cf0/1643345395.002263/events.out.tfevents.1643345395.d2987f7d2cf0.1792174.1 +3 -0
  36. runs/Jan28_04-48-55_d2987f7d2cf0/events.out.tfevents.1643345394.d2987f7d2cf0.1792174.0 +3 -0
  37. runs/Jan28_15-46-12_d2987f7d2cf0/1643384837.416991/events.out.tfevents.1643384837.d2987f7d2cf0.2385063.1 +3 -0
  38. runs/Jan28_15-46-12_d2987f7d2cf0/events.out.tfevents.1643384837.d2987f7d2cf0.2385063.0 +3 -0
  39. runs/Jan28_15-52-48_d2987f7d2cf0/1643385218.1226718/events.out.tfevents.1643385218.d2987f7d2cf0.2386553.1 +3 -0
  40. runs/Jan28_15-52-48_d2987f7d2cf0/events.out.tfevents.1643385218.d2987f7d2cf0.2386553.0 +3 -0
  41. runs/Jan28_16-07-07_d2987f7d2cf0/1643386076.9584827/events.out.tfevents.1643386076.d2987f7d2cf0.2386872.1 +3 -0
  42. runs/Jan28_16-07-07_d2987f7d2cf0/events.out.tfevents.1643386076.d2987f7d2cf0.2386872.0 +3 -0
  43. runs/Jan28_16-12-12_d2987f7d2cf0/1643386381.6106/events.out.tfevents.1643386381.d2987f7d2cf0.2388587.1 +3 -0
  44. runs/Jan28_16-12-12_d2987f7d2cf0/events.out.tfevents.1643386381.d2987f7d2cf0.2388587.0 +3 -0
  45. runs/Jan28_16-12-12_d2987f7d2cf0/events.out.tfevents.1643386687.d2987f7d2cf0.2388587.2 +3 -0
  46. special_tokens_map.json +1 -0
  47. tokenizer_config.json +1 -0
  48. train_results.json +8 -0
  49. trainer_state.json +0 -0
  50. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - automatic-speech-recognition
4
+ - librispeech_asr
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: wavlm-libri-clean-100h-base
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # wavlm-libri-clean-100h-base
15
+
16
+ This model is a fine-tuned version of [microsoft/wavlm-base](https://huggingface.co/microsoft/wavlm-base) on the LIBRISPEECH_ASR - CLEAN dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.0955
19
+ - Wer: 0.0773
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.0003
39
+ - train_batch_size: 16
40
+ - eval_batch_size: 16
41
+ - seed: 42
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: linear
44
+ - lr_scheduler_warmup_steps: 500
45
+ - num_epochs: 1.0
46
+ - mixed_precision_training: Native AMP
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
51
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
52
+ | 2.8664 | 0.17 | 300 | 2.8439 | 1.0 |
53
+ | 0.5009 | 0.34 | 600 | 0.2709 | 0.2162 |
54
+ | 0.2056 | 0.5 | 900 | 0.1934 | 0.1602 |
55
+ | 0.1648 | 0.67 | 1200 | 0.1576 | 0.1306 |
56
+ | 0.1922 | 0.84 | 1500 | 0.1358 | 0.1114 |
57
+ | 0.093 | 1.01 | 1800 | 0.1277 | 0.1035 |
58
+ | 0.0652 | 1.18 | 2100 | 0.1251 | 0.1005 |
59
+ | 0.0848 | 1.35 | 2400 | 0.1188 | 0.0964 |
60
+ | 0.0706 | 1.51 | 2700 | 0.1091 | 0.0905 |
61
+ | 0.0846 | 1.68 | 3000 | 0.1018 | 0.0840 |
62
+ | 0.0684 | 1.85 | 3300 | 0.0978 | 0.0809 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.15.0
68
+ - Pytorch 1.9.1
69
+ - Datasets 1.18.0
70
+ - Tokenizers 0.10.3
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 29, "</s>": 30}
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.96,
3
+ "eval_loss": 0.09545929729938507,
4
+ "eval_runtime": 297.6826,
5
+ "eval_samples": 2642,
6
+ "eval_samples_per_second": 8.875,
7
+ "eval_steps_per_second": 0.558,
8
+ "eval_wer": 0.07727092423641412,
9
+ "train_loss": 0.0,
10
+ "train_runtime": 7.6753,
11
+ "train_samples": 28538,
12
+ "train_samples_per_second": 3718.173,
13
+ "train_steps_per_second": 232.435
14
+ }
checkpoint-2500/config.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/wavlm-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "WavLMForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.0,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "freeze_feat_extract_train": true,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 768,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.0,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.05,
75
+ "mask_time_selection": "static",
76
+ "max_bucket_distance": 800,
77
+ "model_type": "wavlm",
78
+ "no_mask_channel_overlap": false,
79
+ "no_mask_time_overlap": false,
80
+ "num_adapter_layers": 3,
81
+ "num_attention_heads": 12,
82
+ "num_buckets": 320,
83
+ "num_codevector_groups": 2,
84
+ "num_codevectors_per_group": 320,
85
+ "num_conv_pos_embedding_groups": 16,
86
+ "num_conv_pos_embeddings": 128,
87
+ "num_ctc_classes": 80,
88
+ "num_feat_extract_layers": 7,
89
+ "num_hidden_layers": 12,
90
+ "num_negatives": 100,
91
+ "output_hidden_size": 768,
92
+ "pad_token_id": 28,
93
+ "proj_codevector_dim": 256,
94
+ "tdnn_dilation": [
95
+ 1,
96
+ 2,
97
+ 3,
98
+ 1,
99
+ 1
100
+ ],
101
+ "tdnn_dim": [
102
+ 512,
103
+ 512,
104
+ 512,
105
+ 512,
106
+ 1500
107
+ ],
108
+ "tdnn_kernel": [
109
+ 5,
110
+ 3,
111
+ 3,
112
+ 1,
113
+ 1
114
+ ],
115
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
116
+ "torch_dtype": "float32",
117
+ "transformers_version": "4.15.0",
118
+ "use_weighted_layer_sum": false,
119
+ "vocab_size": 31,
120
+ "xvector_output_dim": 512
121
+ }
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fb791143dfbb3b1cf1d14e46d62e481c6e2c78ea34af1bf60acc3c3e01cc95c
3
+ size 721780605
checkpoint-2500/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
checkpoint-2500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a18ea5c1191e01820b5482ebe5e73a366dc76e3747a08a397f2ac22a13895cf5
3
+ size 377719629
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fff66da33640d4a701404dc80c65deb581e473dc3806f2431e25e6d869a16613
3
+ size 17563
checkpoint-2500/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d7885acd14de8af3583d1c613c429fcc523623c0fbb1d5873894df5cfccd602
3
+ size 559
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:addfb91bf467aca28586b06bb3086e02b38295375b6e572a9817c013856b8ef7
3
+ size 623
checkpoint-2500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ac8ac9c168668ea9d4f2d6887a03ad67003a09c72bd20b80b257e71770101f
3
+ size 2991
checkpoint-3000/config.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/wavlm-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "WavLMForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.0,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "freeze_feat_extract_train": true,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 768,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.0,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.05,
75
+ "mask_time_selection": "static",
76
+ "max_bucket_distance": 800,
77
+ "model_type": "wavlm",
78
+ "no_mask_channel_overlap": false,
79
+ "no_mask_time_overlap": false,
80
+ "num_adapter_layers": 3,
81
+ "num_attention_heads": 12,
82
+ "num_buckets": 320,
83
+ "num_codevector_groups": 2,
84
+ "num_codevectors_per_group": 320,
85
+ "num_conv_pos_embedding_groups": 16,
86
+ "num_conv_pos_embeddings": 128,
87
+ "num_ctc_classes": 80,
88
+ "num_feat_extract_layers": 7,
89
+ "num_hidden_layers": 12,
90
+ "num_negatives": 100,
91
+ "output_hidden_size": 768,
92
+ "pad_token_id": 28,
93
+ "proj_codevector_dim": 256,
94
+ "tdnn_dilation": [
95
+ 1,
96
+ 2,
97
+ 3,
98
+ 1,
99
+ 1
100
+ ],
101
+ "tdnn_dim": [
102
+ 512,
103
+ 512,
104
+ 512,
105
+ 512,
106
+ 1500
107
+ ],
108
+ "tdnn_kernel": [
109
+ 5,
110
+ 3,
111
+ 3,
112
+ 1,
113
+ 1
114
+ ],
115
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
116
+ "torch_dtype": "float32",
117
+ "transformers_version": "4.15.0",
118
+ "use_weighted_layer_sum": false,
119
+ "vocab_size": 31,
120
+ "xvector_output_dim": 512
121
+ }
checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7495ea2d4207f8ff07a18a9aa6ce6c38a13c4962eec5355b1a81fbdf8f37c8c7
3
+ size 721780605
checkpoint-3000/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
checkpoint-3000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2259d6b6a9a2ecdafb79c75724a6b769fb07230e7959bd0aa8f8527069d58524
3
+ size 377719629
checkpoint-3000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e57e0ea02d5be7051bebc2ee12a6143a760a7dbb68d231c38f991a53c5f5fa2
3
+ size 17627
checkpoint-3000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:093dbaa048280b9a3992fd16ffeff174077217d2aa672399db12a99b1447ff0e
3
+ size 559
checkpoint-3000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aa567110332372f26c1efc0590179f8f95f00ec7e01e1691c668f64711cafc3
3
+ size 623
checkpoint-3000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ac8ac9c168668ea9d4f2d6887a03ad67003a09c72bd20b80b257e71770101f
3
+ size 2991
checkpoint-3500/config.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/wavlm-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "WavLMForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.0,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "freeze_feat_extract_train": true,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 768,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.0,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.05,
75
+ "mask_time_selection": "static",
76
+ "max_bucket_distance": 800,
77
+ "model_type": "wavlm",
78
+ "no_mask_channel_overlap": false,
79
+ "no_mask_time_overlap": false,
80
+ "num_adapter_layers": 3,
81
+ "num_attention_heads": 12,
82
+ "num_buckets": 320,
83
+ "num_codevector_groups": 2,
84
+ "num_codevectors_per_group": 320,
85
+ "num_conv_pos_embedding_groups": 16,
86
+ "num_conv_pos_embeddings": 128,
87
+ "num_ctc_classes": 80,
88
+ "num_feat_extract_layers": 7,
89
+ "num_hidden_layers": 12,
90
+ "num_negatives": 100,
91
+ "output_hidden_size": 768,
92
+ "pad_token_id": 28,
93
+ "proj_codevector_dim": 256,
94
+ "tdnn_dilation": [
95
+ 1,
96
+ 2,
97
+ 3,
98
+ 1,
99
+ 1
100
+ ],
101
+ "tdnn_dim": [
102
+ 512,
103
+ 512,
104
+ 512,
105
+ 512,
106
+ 1500
107
+ ],
108
+ "tdnn_kernel": [
109
+ 5,
110
+ 3,
111
+ 3,
112
+ 1,
113
+ 1
114
+ ],
115
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
116
+ "torch_dtype": "float32",
117
+ "transformers_version": "4.15.0",
118
+ "use_weighted_layer_sum": false,
119
+ "vocab_size": 31,
120
+ "xvector_output_dim": 512
121
+ }
checkpoint-3500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a84dbc7ec17c35e0eb9855ae267a22d87d23d66b1823b63ab4ec51920f19ebe4
3
+ size 721780605
checkpoint-3500/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
checkpoint-3500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee9330b00c16a2b30f23e09ecc63408058e193e50d7838447c1c87a0bc9f0c52
3
+ size 377719629
checkpoint-3500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a15658bb0935e17efb513e7b6f3d84ee2fd4fb12aac29d62200444ab6afa890b
3
+ size 17627
checkpoint-3500/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b853338b3fe0587db6bc86d6a7c83b41072523465a2431483252763bf4fe51a
3
+ size 559
checkpoint-3500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f83b5b46a684f6ed4abd343a4042beecee67985c8385de39e6b1490461106d
3
+ size 623
checkpoint-3500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ac8ac9c168668ea9d4f2d6887a03ad67003a09c72bd20b80b257e71770101f
3
+ size 2991
config.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/wavlm-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "WavLMForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.0,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "freeze_feat_extract_train": true,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 768,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.0,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.05,
75
+ "mask_time_selection": "static",
76
+ "max_bucket_distance": 800,
77
+ "model_type": "wavlm",
78
+ "no_mask_channel_overlap": false,
79
+ "no_mask_time_overlap": false,
80
+ "num_adapter_layers": 3,
81
+ "num_attention_heads": 12,
82
+ "num_buckets": 320,
83
+ "num_codevector_groups": 2,
84
+ "num_codevectors_per_group": 320,
85
+ "num_conv_pos_embedding_groups": 16,
86
+ "num_conv_pos_embeddings": 128,
87
+ "num_ctc_classes": 80,
88
+ "num_feat_extract_layers": 7,
89
+ "num_hidden_layers": 12,
90
+ "num_negatives": 100,
91
+ "output_hidden_size": 768,
92
+ "pad_token_id": 28,
93
+ "proj_codevector_dim": 256,
94
+ "tdnn_dilation": [
95
+ 1,
96
+ 2,
97
+ 3,
98
+ 1,
99
+ 1
100
+ ],
101
+ "tdnn_dim": [
102
+ 512,
103
+ 512,
104
+ 512,
105
+ 512,
106
+ 1500
107
+ ],
108
+ "tdnn_kernel": [
109
+ 5,
110
+ 3,
111
+ 3,
112
+ 1,
113
+ 1
114
+ ],
115
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
116
+ "torch_dtype": "float32",
117
+ "transformers_version": "4.15.0",
118
+ "use_weighted_layer_sum": false,
119
+ "vocab_size": 31,
120
+ "xvector_output_dim": 512
121
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.96,
3
+ "eval_loss": 0.09545929729938507,
4
+ "eval_runtime": 297.6826,
5
+ "eval_samples": 2642,
6
+ "eval_samples_per_second": 8.875,
7
+ "eval_steps_per_second": 0.558,
8
+ "eval_wer": 0.07727092423641412
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": false,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee9330b00c16a2b30f23e09ecc63408058e193e50d7838447c1c87a0bc9f0c52
3
+ size 377719629
runs/Jan28_04-48-55_d2987f7d2cf0/1643345395.002263/events.out.tfevents.1643345395.d2987f7d2cf0.1792174.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:752f881e0df06e3a56214f5e330f5becc9539b54ab480a80ff6480bfd9907a7e
3
+ size 4784
runs/Jan28_04-48-55_d2987f7d2cf0/events.out.tfevents.1643345394.d2987f7d2cf0.1792174.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94db18774842b14dccc30f96f1106f980c2650c1e75a1e2bd0ad3385c6df6a51
3
+ size 585230
runs/Jan28_15-46-12_d2987f7d2cf0/1643384837.416991/events.out.tfevents.1643384837.d2987f7d2cf0.2385063.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d4fcf9aeb6de595206114cf09c7ba14feb8ac4ec76f3e265db909d17e9ccde1
3
+ size 4784
runs/Jan28_15-46-12_d2987f7d2cf0/events.out.tfevents.1643384837.d2987f7d2cf0.2385063.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d18a32cf6885d879ed343a8460fbfc114a145e1303b7218da2e19782e9c6678
3
+ size 7290
runs/Jan28_15-52-48_d2987f7d2cf0/1643385218.1226718/events.out.tfevents.1643385218.d2987f7d2cf0.2386553.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c57ccb1341a2be27b031be484ba4475f953003e25ed94fa4300305541f80ab
3
+ size 4784
runs/Jan28_15-52-48_d2987f7d2cf0/events.out.tfevents.1643385218.d2987f7d2cf0.2386553.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6fba388c2f20fb8ee64278a063e39e787102e19fe10f11cce935eb6ae394626
3
+ size 5135
runs/Jan28_16-07-07_d2987f7d2cf0/1643386076.9584827/events.out.tfevents.1643386076.d2987f7d2cf0.2386872.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e21ce31da8c61a13df25e95852ce667438a496a94af0f95f1feafb1cb3767e5
3
+ size 4784
runs/Jan28_16-07-07_d2987f7d2cf0/events.out.tfevents.1643386076.d2987f7d2cf0.2386872.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dea03685519d38aedc95d42b2abe61225d14c91dc6227e52d7fa22db6c202d35
3
+ size 7489
runs/Jan28_16-12-12_d2987f7d2cf0/1643386381.6106/events.out.tfevents.1643386381.d2987f7d2cf0.2388587.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:167a167f20cd827b216614aba78a1dc43407d32ce19bf6ed426be73cfd87e21d
3
+ size 4784
runs/Jan28_16-12-12_d2987f7d2cf0/events.out.tfevents.1643386381.d2987f7d2cf0.2388587.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:344870ca309d912e401fb72795fcf15744de1ca54ba275d7b263cfbb437c1778
3
+ size 5488
runs/Jan28_16-12-12_d2987f7d2cf0/events.out.tfevents.1643386687.d2987f7d2cf0.2388587.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:671838c8481001a2572b25babbe8de1075e800c5de8699537ec4f0c2cf547d41
3
+ size 358
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./wavlm-libri-clean-100h-base", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.96,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 7.6753,
5
+ "train_samples": 28538,
6
+ "train_samples_per_second": 3718.173,
7
+ "train_steps_per_second": 232.435
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a04b72fe215fc52f9c23c9f56ceb93f9b5f3d0dce3528061f5506af12028642c
3
+ size 2991