jun-han commited on
Commit
5bd202e
·
verified ·
1 Parent(s): 08fa3b3

Training checkpoint

Browse files
Files changed (2) hide show
  1. README.md +8 -6
  2. trainer_state.json +330 -0
README.md CHANGED
@@ -1,22 +1,24 @@
1
  ---
2
- base_model: openai/whisper-small
3
  library_name: transformers
 
 
4
  license: apache-2.0
5
- metrics:
6
- - wer
7
  tags:
8
  - generated_from_trainer
 
 
9
  model-index:
10
- - name: Whisper-squeezeformer-v7
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # Whisper-squeezeformer-v7
18
 
19
- This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.1579
22
  - Wer: 5.4340
 
1
  ---
 
2
  library_name: transformers
3
+ language:
4
+ - en
5
  license: apache-2.0
6
+ base_model: openai/whisper-small
 
7
  tags:
8
  - generated_from_trainer
9
+ metrics:
10
+ - wer
11
  model-index:
12
+ - name: Whisper-squeezeformer-N6SQU-
13
  results: []
14
  ---
15
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
+ # Whisper-squeezeformer-N6SQU-
20
 
21
+ This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the LibriSpeech dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 0.1579
24
  - Wer: 5.4340
trainer_state.json ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 5.066950699939135,
3
+ "best_model_checkpoint": "../Whisper-squeezeformer-v7\\checkpoint-40000",
4
+ "epoch": 18.0,
5
+ "eval_steps": 2500,
6
+ "global_step": 45000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 10.708171844482422,
14
+ "learning_rate": 9.976e-06,
15
+ "loss": 4.7861,
16
+ "step": 2500
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_loss": 3.87461519241333,
21
+ "eval_runtime": 644.5089,
22
+ "eval_samples_per_second": 4.065,
23
+ "eval_steps_per_second": 0.509,
24
+ "eval_wer": 131.59996956786367,
25
+ "step": 2500
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "grad_norm": 5.609691143035889,
30
+ "learning_rate": 9.413176470588235e-06,
31
+ "loss": 2.7936,
32
+ "step": 5000
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_loss": 0.2787948250770569,
37
+ "eval_runtime": 654.0833,
38
+ "eval_samples_per_second": 4.006,
39
+ "eval_steps_per_second": 0.501,
40
+ "eval_wer": 14.139531345100426,
41
+ "step": 5000
42
+ },
43
+ {
44
+ "epoch": 3.0,
45
+ "grad_norm": 3.4609644412994385,
46
+ "learning_rate": 8.82494117647059e-06,
47
+ "loss": 0.1896,
48
+ "step": 7500
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "eval_loss": 0.20549724996089935,
53
+ "eval_runtime": 1218.2705,
54
+ "eval_samples_per_second": 2.151,
55
+ "eval_steps_per_second": 0.269,
56
+ "eval_wer": 10.453438831405965,
57
+ "step": 7500
58
+ },
59
+ {
60
+ "epoch": 4.0,
61
+ "grad_norm": 3.3690125942230225,
62
+ "learning_rate": 8.236705882352943e-06,
63
+ "loss": 0.1024,
64
+ "step": 10000
65
+ },
66
+ {
67
+ "epoch": 4.0,
68
+ "eval_loss": 0.19734220206737518,
69
+ "eval_runtime": 1187.8954,
70
+ "eval_samples_per_second": 2.206,
71
+ "eval_steps_per_second": 0.276,
72
+ "eval_wer": 8.690276932440657,
73
+ "step": 10000
74
+ },
75
+ {
76
+ "epoch": 5.0,
77
+ "grad_norm": 2.1822853088378906,
78
+ "learning_rate": 7.648470588235296e-06,
79
+ "loss": 0.0602,
80
+ "step": 12500
81
+ },
82
+ {
83
+ "epoch": 5.0,
84
+ "eval_loss": 0.19494721293449402,
85
+ "eval_runtime": 1272.2089,
86
+ "eval_samples_per_second": 2.059,
87
+ "eval_steps_per_second": 0.258,
88
+ "eval_wer": 8.94704808277541,
89
+ "step": 12500
90
+ },
91
+ {
92
+ "epoch": 6.0,
93
+ "grad_norm": 3.247063636779785,
94
+ "learning_rate": 7.060470588235294e-06,
95
+ "loss": 0.1756,
96
+ "step": 15000
97
+ },
98
+ {
99
+ "epoch": 6.0,
100
+ "eval_loss": 0.15839330852031708,
101
+ "eval_runtime": 1226.4236,
102
+ "eval_samples_per_second": 2.136,
103
+ "eval_steps_per_second": 0.267,
104
+ "eval_wer": 7.5034236153377964,
105
+ "step": 15000
106
+ },
107
+ {
108
+ "epoch": 7.0,
109
+ "grad_norm": 4.580276966094971,
110
+ "learning_rate": 6.472235294117648e-06,
111
+ "loss": 0.1005,
112
+ "step": 17500
113
+ },
114
+ {
115
+ "epoch": 7.0,
116
+ "eval_loss": 0.1525421440601349,
117
+ "eval_runtime": 1332.1465,
118
+ "eval_samples_per_second": 1.967,
119
+ "eval_steps_per_second": 0.246,
120
+ "eval_wer": 6.704580036518563,
121
+ "step": 17500
122
+ },
123
+ {
124
+ "epoch": 8.0,
125
+ "grad_norm": 2.37420916557312,
126
+ "learning_rate": 5.884235294117648e-06,
127
+ "loss": 0.0619,
128
+ "step": 20000
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_loss": 0.15489771962165833,
133
+ "eval_runtime": 1391.991,
134
+ "eval_samples_per_second": 1.882,
135
+ "eval_steps_per_second": 0.236,
136
+ "eval_wer": 6.771150334753499,
137
+ "step": 20000
138
+ },
139
+ {
140
+ "epoch": 9.0,
141
+ "grad_norm": 4.263836860656738,
142
+ "learning_rate": 5.2962352941176475e-06,
143
+ "loss": 0.2214,
144
+ "step": 22500
145
+ },
146
+ {
147
+ "epoch": 9.0,
148
+ "eval_loss": 0.14554446935653687,
149
+ "eval_runtime": 1191.0225,
150
+ "eval_samples_per_second": 2.2,
151
+ "eval_steps_per_second": 0.275,
152
+ "eval_wer": 6.318472306755934,
153
+ "step": 22500
154
+ },
155
+ {
156
+ "epoch": 10.0,
157
+ "grad_norm": 3.3093366622924805,
158
+ "learning_rate": 4.708470588235294e-06,
159
+ "loss": 0.1398,
160
+ "step": 25000
161
+ },
162
+ {
163
+ "epoch": 10.0,
164
+ "eval_loss": 0.14445646107196808,
165
+ "eval_runtime": 1180.1189,
166
+ "eval_samples_per_second": 2.22,
167
+ "eval_steps_per_second": 0.278,
168
+ "eval_wer": 6.162507608034084,
169
+ "step": 25000
170
+ },
171
+ {
172
+ "epoch": 11.0,
173
+ "grad_norm": 3.834306001663208,
174
+ "learning_rate": 4.120705882352942e-06,
175
+ "loss": 0.1967,
176
+ "step": 27500
177
+ },
178
+ {
179
+ "epoch": 11.0,
180
+ "eval_loss": 0.13017532229423523,
181
+ "eval_runtime": 1349.0935,
182
+ "eval_samples_per_second": 1.942,
183
+ "eval_steps_per_second": 0.243,
184
+ "eval_wer": 5.517726719415704,
185
+ "step": 27500
186
+ },
187
+ {
188
+ "epoch": 12.0,
189
+ "grad_norm": 3.707526206970215,
190
+ "learning_rate": 3.5327058823529413e-06,
191
+ "loss": 0.1329,
192
+ "step": 30000
193
+ },
194
+ {
195
+ "epoch": 12.0,
196
+ "eval_loss": 0.12983843684196472,
197
+ "eval_runtime": 1194.9126,
198
+ "eval_samples_per_second": 2.193,
199
+ "eval_steps_per_second": 0.274,
200
+ "eval_wer": 5.548158855751674,
201
+ "step": 30000
202
+ },
203
+ {
204
+ "epoch": 13.0,
205
+ "grad_norm": 3.3835012912750244,
206
+ "learning_rate": 2.944705882352941e-06,
207
+ "loss": 0.1778,
208
+ "step": 32500
209
+ },
210
+ {
211
+ "epoch": 13.0,
212
+ "eval_loss": 0.12274244427680969,
213
+ "eval_runtime": 1372.9808,
214
+ "eval_samples_per_second": 1.908,
215
+ "eval_steps_per_second": 0.239,
216
+ "eval_wer": 5.3237218502738894,
217
+ "step": 32500
218
+ },
219
+ {
220
+ "epoch": 14.0,
221
+ "grad_norm": 3.3048934936523438,
222
+ "learning_rate": 2.356705882352941e-06,
223
+ "loss": 0.1281,
224
+ "step": 35000
225
+ },
226
+ {
227
+ "epoch": 14.0,
228
+ "eval_loss": 0.12345948815345764,
229
+ "eval_runtime": 1191.4459,
230
+ "eval_samples_per_second": 2.199,
231
+ "eval_steps_per_second": 0.275,
232
+ "eval_wer": 5.179169202678028,
233
+ "step": 35000
234
+ },
235
+ {
236
+ "epoch": 15.0,
237
+ "grad_norm": 7.545591831207275,
238
+ "learning_rate": 1.7689411764705882e-06,
239
+ "loss": 0.3553,
240
+ "step": 37500
241
+ },
242
+ {
243
+ "epoch": 15.0,
244
+ "eval_loss": 0.12376432865858078,
245
+ "eval_runtime": 1257.7405,
246
+ "eval_samples_per_second": 2.083,
247
+ "eval_steps_per_second": 0.261,
248
+ "eval_wer": 5.236229458307974,
249
+ "step": 37500
250
+ },
251
+ {
252
+ "epoch": 16.0,
253
+ "grad_norm": 6.769348621368408,
254
+ "learning_rate": 1.1814117647058825e-06,
255
+ "loss": 0.2678,
256
+ "step": 40000
257
+ },
258
+ {
259
+ "epoch": 16.0,
260
+ "eval_loss": 0.121117502450943,
261
+ "eval_runtime": 1262.3643,
262
+ "eval_samples_per_second": 2.075,
263
+ "eval_steps_per_second": 0.26,
264
+ "eval_wer": 5.066950699939135,
265
+ "step": 40000
266
+ },
267
+ {
268
+ "epoch": 17.0,
269
+ "grad_norm": 15.616162300109863,
270
+ "learning_rate": 5.945882352941177e-07,
271
+ "loss": 0.8916,
272
+ "step": 42500
273
+ },
274
+ {
275
+ "epoch": 17.0,
276
+ "eval_loss": 0.14307822287082672,
277
+ "eval_runtime": 1570.3407,
278
+ "eval_samples_per_second": 1.668,
279
+ "eval_steps_per_second": 0.209,
280
+ "eval_wer": 5.3617620206938525,
281
+ "step": 42500
282
+ },
283
+ {
284
+ "epoch": 18.0,
285
+ "grad_norm": 9.218775749206543,
286
+ "learning_rate": 6.3529411764705896e-09,
287
+ "loss": 0.8058,
288
+ "step": 45000
289
+ },
290
+ {
291
+ "epoch": 18.0,
292
+ "eval_loss": 0.15793535113334656,
293
+ "eval_runtime": 1362.4738,
294
+ "eval_samples_per_second": 1.923,
295
+ "eval_steps_per_second": 0.241,
296
+ "eval_wer": 5.434038344491783,
297
+ "step": 45000
298
+ },
299
+ {
300
+ "epoch": 18.0,
301
+ "step": 45000,
302
+ "total_flos": 3.1197915376459776e+20,
303
+ "train_loss": 0.09429476996527777,
304
+ "train_runtime": 46691.7188,
305
+ "train_samples_per_second": 19.275,
306
+ "train_steps_per_second": 0.964
307
+ }
308
+ ],
309
+ "logging_steps": 2500,
310
+ "max_steps": 45000,
311
+ "num_input_tokens_seen": 0,
312
+ "num_train_epochs": 18,
313
+ "save_steps": 2500,
314
+ "stateful_callbacks": {
315
+ "TrainerControl": {
316
+ "args": {
317
+ "should_epoch_stop": false,
318
+ "should_evaluate": false,
319
+ "should_log": false,
320
+ "should_save": true,
321
+ "should_training_stop": true
322
+ },
323
+ "attributes": {}
324
+ }
325
+ },
326
+ "total_flos": 3.1197915376459776e+20,
327
+ "train_batch_size": 20,
328
+ "trial_name": null,
329
+ "trial_params": null
330
+ }