mrm8488 commited on
Commit
87e6587
1 Parent(s): 96c8efc

Initial commit

Browse files
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "conv_bias": true,
11
+ "conv_dim": [
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512
19
+ ],
20
+ "conv_kernel": [
21
+ 10,
22
+ 3,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 2,
27
+ 2
28
+ ],
29
+ "conv_stride": [
30
+ 5,
31
+ 2,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2
37
+ ],
38
+ "ctc_loss_reduction": "mean",
39
+ "ctc_zero_infinity": false,
40
+ "do_stable_layer_norm": true,
41
+ "eos_token_id": 2,
42
+ "feat_extract_activation": "gelu",
43
+ "feat_extract_dropout": 0.0,
44
+ "feat_extract_norm": "layer",
45
+ "feat_proj_dropout": 0.0,
46
+ "final_dropout": 0.0,
47
+ "gradient_checkpointing": true,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 1024,
51
+ "initializer_range": 0.02,
52
+ "intermediate_size": 4096,
53
+ "layer_norm_eps": 1e-05,
54
+ "layerdrop": 0.1,
55
+ "mask_channel_length": 10,
56
+ "mask_channel_min_space": 1,
57
+ "mask_channel_other": 0.0,
58
+ "mask_channel_prob": 0.0,
59
+ "mask_channel_selection": "static",
60
+ "mask_feature_length": 10,
61
+ "mask_feature_prob": 0.0,
62
+ "mask_time_length": 10,
63
+ "mask_time_min_space": 1,
64
+ "mask_time_other": 0.0,
65
+ "mask_time_prob": 0.05,
66
+ "mask_time_selection": "static",
67
+ "model_type": "wav2vec2",
68
+ "num_attention_heads": 16,
69
+ "num_conv_pos_embedding_groups": 16,
70
+ "num_conv_pos_embeddings": 128,
71
+ "num_feat_extract_layers": 7,
72
+ "num_hidden_layers": 24,
73
+ "pad_token_id": 29,
74
+ "transformers_version": "4.5.0.dev0",
75
+ "vocab_size": 30
76
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_size": 1,
4
+ "padding_side": "right",
5
+ "padding_value": 0.0,
6
+ "return_attention_mask": true,
7
+ "sampling_rate": 16000
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b98f47bd4e856a0aba2d9c189725c0c8a895b2e4d32bd5cb1a154878af17d31
3
+ size 1262056855
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}
trainer_state.json ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 28.93617021276596,
5
+ "global_step": 6800,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.7,
12
+ "learning_rate": 0.00023999999999999998,
13
+ "loss": 4.3481,
14
+ "step": 400
15
+ },
16
+ {
17
+ "epoch": 1.7,
18
+ "eval_loss": 2.619910955429077,
19
+ "eval_runtime": 671.3151,
20
+ "eval_samples_per_second": 7.704,
21
+ "eval_wer": 0.99997385142379,
22
+ "step": 400
23
+ },
24
+ {
25
+ "epoch": 3.4,
26
+ "learning_rate": 0.00028625954198473283,
27
+ "loss": 0.5343,
28
+ "step": 800
29
+ },
30
+ {
31
+ "epoch": 3.4,
32
+ "eval_loss": 0.27097710967063904,
33
+ "eval_runtime": 691.1513,
34
+ "eval_samples_per_second": 7.483,
35
+ "eval_wer": 0.4792511047773449,
36
+ "step": 800
37
+ },
38
+ {
39
+ "epoch": 5.11,
40
+ "learning_rate": 0.0002679389312977099,
41
+ "loss": 0.1541,
42
+ "step": 1200
43
+ },
44
+ {
45
+ "epoch": 5.11,
46
+ "eval_loss": 0.2556667923927307,
47
+ "eval_runtime": 706.9613,
48
+ "eval_samples_per_second": 7.316,
49
+ "eval_wer": 0.44368904113171037,
50
+ "step": 1200
51
+ },
52
+ {
53
+ "epoch": 6.81,
54
+ "learning_rate": 0.00024961832061068704,
55
+ "loss": 0.1049,
56
+ "step": 1600
57
+ },
58
+ {
59
+ "epoch": 6.81,
60
+ "eval_loss": 0.23636971414089203,
61
+ "eval_runtime": 720.2903,
62
+ "eval_samples_per_second": 7.18,
63
+ "eval_wer": 0.4181680307507256,
64
+ "step": 1600
65
+ },
66
+ {
67
+ "epoch": 8.51,
68
+ "learning_rate": 0.0002312977099236641,
69
+ "loss": 0.0867,
70
+ "step": 2000
71
+ },
72
+ {
73
+ "epoch": 8.51,
74
+ "eval_loss": 0.24546758830547333,
75
+ "eval_runtime": 728.0447,
76
+ "eval_samples_per_second": 7.104,
77
+ "eval_wer": 0.40582590277959363,
78
+ "step": 2000
79
+ },
80
+ {
81
+ "epoch": 10.21,
82
+ "learning_rate": 0.0002129770992366412,
83
+ "loss": 0.0696,
84
+ "step": 2400
85
+ },
86
+ {
87
+ "epoch": 10.21,
88
+ "eval_loss": 0.26230543851852417,
89
+ "eval_runtime": 725.2392,
90
+ "eval_samples_per_second": 7.131,
91
+ "eval_wer": 0.3916272258975499,
92
+ "step": 2400
93
+ },
94
+ {
95
+ "epoch": 11.91,
96
+ "learning_rate": 0.0001946564885496183,
97
+ "loss": 0.0637,
98
+ "step": 2800
99
+ },
100
+ {
101
+ "epoch": 11.91,
102
+ "eval_loss": 0.2552664577960968,
103
+ "eval_runtime": 729.1761,
104
+ "eval_samples_per_second": 7.093,
105
+ "eval_wer": 0.3940067463326622,
106
+ "step": 2800
107
+ },
108
+ {
109
+ "epoch": 13.62,
110
+ "learning_rate": 0.0001763358778625954,
111
+ "loss": 0.0554,
112
+ "step": 3200
113
+ },
114
+ {
115
+ "epoch": 13.62,
116
+ "eval_loss": 0.24944408237934113,
117
+ "eval_runtime": 725.5889,
118
+ "eval_samples_per_second": 7.128,
119
+ "eval_wer": 0.3822660356143608,
120
+ "step": 3200
121
+ },
122
+ {
123
+ "epoch": 15.32,
124
+ "learning_rate": 0.0001580152671755725,
125
+ "loss": 0.0477,
126
+ "step": 3600
127
+ },
128
+ {
129
+ "epoch": 15.32,
130
+ "eval_loss": 0.25511008501052856,
131
+ "eval_runtime": 714.9627,
132
+ "eval_samples_per_second": 7.234,
133
+ "eval_wer": 0.38171691551395026,
134
+ "step": 3600
135
+ },
136
+ {
137
+ "epoch": 17.02,
138
+ "learning_rate": 0.0001396946564885496,
139
+ "loss": 0.0441,
140
+ "step": 4000
141
+ },
142
+ {
143
+ "epoch": 17.02,
144
+ "eval_loss": 0.26531100273132324,
145
+ "eval_runtime": 724.9275,
146
+ "eval_samples_per_second": 7.135,
147
+ "eval_wer": 0.3808540124990194,
148
+ "step": 4000
149
+ },
150
+ {
151
+ "epoch": 18.72,
152
+ "learning_rate": 0.00012137404580152671,
153
+ "loss": 0.0414,
154
+ "step": 4400
155
+ },
156
+ {
157
+ "epoch": 18.72,
158
+ "eval_loss": 0.2718845307826996,
159
+ "eval_runtime": 724.6581,
160
+ "eval_samples_per_second": 7.137,
161
+ "eval_wer": 0.3761211202050048,
162
+ "step": 4400
163
+ },
164
+ {
165
+ "epoch": 20.43,
166
+ "learning_rate": 0.00010305343511450381,
167
+ "loss": 0.0364,
168
+ "step": 4800
169
+ },
170
+ {
171
+ "epoch": 20.43,
172
+ "eval_loss": 0.2734206020832062,
173
+ "eval_runtime": 721.5427,
174
+ "eval_samples_per_second": 7.168,
175
+ "eval_wer": 0.36911330178071805,
176
+ "step": 4800
177
+ },
178
+ {
179
+ "epoch": 22.13,
180
+ "learning_rate": 8.473282442748092e-05,
181
+ "loss": 0.0345,
182
+ "step": 5200
183
+ },
184
+ {
185
+ "epoch": 22.13,
186
+ "eval_loss": 0.2777670919895172,
187
+ "eval_runtime": 750.8687,
188
+ "eval_samples_per_second": 6.888,
189
+ "eval_wer": 0.3657924326020448,
190
+ "step": 5200
191
+ },
192
+ {
193
+ "epoch": 23.83,
194
+ "learning_rate": 6.641221374045802e-05,
195
+ "loss": 0.0304,
196
+ "step": 5600
197
+ },
198
+ {
199
+ "epoch": 23.83,
200
+ "eval_loss": 0.2627178430557251,
201
+ "eval_runtime": 737.7059,
202
+ "eval_samples_per_second": 7.011,
203
+ "eval_wer": 0.36197474047538114,
204
+ "step": 5600
205
+ },
206
+ {
207
+ "epoch": 25.53,
208
+ "learning_rate": 4.809160305343511e-05,
209
+ "loss": 0.028,
210
+ "step": 6000
211
+ },
212
+ {
213
+ "epoch": 25.53,
214
+ "eval_loss": 0.2723003923892975,
215
+ "eval_runtime": 739.1125,
216
+ "eval_samples_per_second": 6.998,
217
+ "eval_wer": 0.3572156996051565,
218
+ "step": 6000
219
+ },
220
+ {
221
+ "epoch": 27.23,
222
+ "learning_rate": 2.977099236641221e-05,
223
+ "loss": 0.0257,
224
+ "step": 6400
225
+ },
226
+ {
227
+ "epoch": 27.23,
228
+ "eval_loss": 0.2800405025482178,
229
+ "eval_runtime": 740.691,
230
+ "eval_samples_per_second": 6.983,
231
+ "eval_wer": 0.353633344664383,
232
+ "step": 6400
233
+ },
234
+ {
235
+ "epoch": 28.94,
236
+ "learning_rate": 1.1450381679389312e-05,
237
+ "loss": 0.0257,
238
+ "step": 6800
239
+ },
240
+ {
241
+ "epoch": 28.94,
242
+ "eval_loss": 0.27684447169303894,
243
+ "eval_runtime": 742.5074,
244
+ "eval_samples_per_second": 6.966,
245
+ "eval_wer": 0.35324111602123265,
246
+ "step": 6800
247
+ }
248
+ ],
249
+ "max_steps": 7050,
250
+ "num_train_epochs": 30,
251
+ "total_flos": 3.4777475593836667e+19,
252
+ "trial_name": null,
253
+ "trial_params": null
254
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b94ada224cd956051c3a9d3dc97df0e3932dbb6f2377daa0bc80f35798c77e01
3
+ size 2287
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"q": 0, "b": 1, "r": 2, "h": 4, "j": 5, "f": 6, "a": 7, "u": 8, "x": 9, "p": 10, "w": 11, "z": 12, "k": 13, "e": 14, "l": 15, "y": 16, "t": 17, "g": 18, "c": 19, "d": 20, "o": 21, "n": 22, "ñ": 23, "v": 24, "m": 25, "s": 26, "i": 27, "|": 3, "[UNK]": 28, "[PAD]": 29}