Silemo commited on
Commit
d6db005
1 Parent(s): 7c043ae

Training in progress, step 1000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:363a18dff03938cbab3ea0517e5fd8baacab97d4647e9d18d67491af3678daa6
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8be1c95dfd279bbe6f6a75bce4f3ce18633cb0a7088267acd5b4658adc3b6d4
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec51eb4084dc7356ccd5e4f6e60d6eb446a9f6b453cb22d3a3f27e5a78818d65
3
  size 1925064044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb16a6cac42140762a41436bca3742b060da4d0ceea879b4daba9099a1739f7d
3
  size 1925064044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:140766ee026fe5edfef0f038f98f0479f8a9d10cfc1f7fe43d81468743578807
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41ee96486c1fbe8be3afbbb5ed39c71e24b010f6e515774d2f58d902dc02badc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5923d5ad322acc38eebeb7826b39a3fb9c19734502e2060fc78e9bf4e91b73b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e338dcb83a79b3100f74899c743f5e131776c52c884832b4fc10b9cf5c3c8d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 70.1955074875208,
3
  "best_model_checkpoint": "./whisper-it/checkpoint-300",
4
- "epoch": 1.33587786259542,
5
  "eval_steps": 100,
6
- "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -238,6 +238,105 @@
238
  "eval_steps_per_second": 0.112,
239
  "eval_wer": 90.3563505268996,
240
  "step": 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  }
242
  ],
243
  "logging_steps": 25,
@@ -245,7 +344,7 @@
245
  "num_input_tokens_seen": 0,
246
  "num_train_epochs": 8,
247
  "save_steps": 100,
248
- "total_flos": 6.46229286936576e+18,
249
  "trial_name": null,
250
  "trial_params": null
251
  }
 
1
  {
2
  "best_metric": 70.1955074875208,
3
  "best_model_checkpoint": "./whisper-it/checkpoint-300",
4
+ "epoch": 1.9083969465648853,
5
  "eval_steps": 100,
6
+ "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
238
  "eval_steps_per_second": 0.112,
239
  "eval_wer": 90.3563505268996,
240
  "step": 700
241
+ },
242
+ {
243
+ "epoch": 1.38,
244
+ "learning_rate": 9.374285714285715e-06,
245
+ "loss": 0.1626,
246
+ "step": 725
247
+ },
248
+ {
249
+ "epoch": 1.43,
250
+ "learning_rate": 9.302857142857144e-06,
251
+ "loss": 0.1674,
252
+ "step": 750
253
+ },
254
+ {
255
+ "epoch": 1.48,
256
+ "learning_rate": 9.231428571428573e-06,
257
+ "loss": 0.1717,
258
+ "step": 775
259
+ },
260
+ {
261
+ "epoch": 1.53,
262
+ "learning_rate": 9.16e-06,
263
+ "loss": 0.1607,
264
+ "step": 800
265
+ },
266
+ {
267
+ "epoch": 1.53,
268
+ "eval_loss": 0.3789268732070923,
269
+ "eval_runtime": 1834.2673,
270
+ "eval_samples_per_second": 0.818,
271
+ "eval_steps_per_second": 0.102,
272
+ "eval_wer": 134.98336106489182,
273
+ "step": 800
274
+ },
275
+ {
276
+ "epoch": 1.57,
277
+ "learning_rate": 9.08857142857143e-06,
278
+ "loss": 0.1636,
279
+ "step": 825
280
+ },
281
+ {
282
+ "epoch": 1.62,
283
+ "learning_rate": 9.017142857142858e-06,
284
+ "loss": 0.1625,
285
+ "step": 850
286
+ },
287
+ {
288
+ "epoch": 1.67,
289
+ "learning_rate": 8.945714285714286e-06,
290
+ "loss": 0.1762,
291
+ "step": 875
292
+ },
293
+ {
294
+ "epoch": 1.72,
295
+ "learning_rate": 8.874285714285715e-06,
296
+ "loss": 0.154,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 1.72,
301
+ "eval_loss": 0.3783314824104309,
302
+ "eval_runtime": 1787.7259,
303
+ "eval_samples_per_second": 0.839,
304
+ "eval_steps_per_second": 0.105,
305
+ "eval_wer": 99.0501941209096,
306
+ "step": 900
307
+ },
308
+ {
309
+ "epoch": 1.77,
310
+ "learning_rate": 8.802857142857144e-06,
311
+ "loss": 0.1703,
312
+ "step": 925
313
+ },
314
+ {
315
+ "epoch": 1.81,
316
+ "learning_rate": 8.731428571428571e-06,
317
+ "loss": 0.1593,
318
+ "step": 950
319
+ },
320
+ {
321
+ "epoch": 1.86,
322
+ "learning_rate": 8.66e-06,
323
+ "loss": 0.1648,
324
+ "step": 975
325
+ },
326
+ {
327
+ "epoch": 1.91,
328
+ "learning_rate": 8.588571428571429e-06,
329
+ "loss": 0.1562,
330
+ "step": 1000
331
+ },
332
+ {
333
+ "epoch": 1.91,
334
+ "eval_loss": 0.36686423420906067,
335
+ "eval_runtime": 1815.5283,
336
+ "eval_samples_per_second": 0.826,
337
+ "eval_steps_per_second": 0.104,
338
+ "eval_wer": 98.31530782029951,
339
+ "step": 1000
340
  }
341
  ],
342
  "logging_steps": 25,
 
344
  "num_input_tokens_seen": 0,
345
  "num_train_epochs": 8,
346
  "save_steps": 100,
347
+ "total_flos": 9.23271271243776e+18,
348
  "trial_name": null,
349
  "trial_params": null
350
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8639ff321d8732686fcd47480f025401063d95f80a6017764993068cc24ca794
3
  size 4792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26fb5b39845f83ef97933929d4301eb10fbbba57913dc6054b50bd05a1ce5a9f
3
  size 4792