KevinKibe commited on
Commit
b76afd0
1 Parent(s): fc13d1b

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f6df6d40dfb035fcec155c3cfa4fabc79c7f5264f6601ffac386e91b24fc0d5
3
  size 2377150
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5b3adfd47dbd764029d7333027ab658643a9aa6609e86743308674c03424805
3
  size 2377150
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e9c3128a5e93efdd5020fb25b025cf9d809e5044e8b33447649b63faa704025
3
  size 826490
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f75e38ca1519f9662eca4cf40a8e0a6dd45a7e93ae3ee63376a51f2b13d262b
3
  size 826490
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a4470d1ea49e81453eb7f521785faae83ec370b2823751ff20204b46aff8639
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:848ad77b64ea2b3d439c394c12358dde6be4207e29fcfe6f6e91fa8b0c4d39ac
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0f38a2c63d0ec9849191dc1faf9440cf5827dcc6785c8b62720c0320435ae75
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d51b7cf9db4ba02d1e4e9ed40ce73e24d1e5b75b6383f2c2d1f5ddb927734ef
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 218.18181818181816,
3
- "best_model_checkpoint": "../openai/whisper-tiny-finetuned/checkpoint-25",
4
- "epoch": 24.02,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -23,6 +23,22 @@
23
  "eval_steps_per_second": 0.107,
24
  "eval_wer": 218.18181818181816,
25
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  }
27
  ],
28
  "logging_steps": 25,
@@ -30,7 +46,7 @@
30
  "num_input_tokens_seen": 0,
31
  "num_train_epochs": 9223372036854775807,
32
  "save_steps": 25,
33
- "total_flos": 8913881088000000.0,
34
  "train_batch_size": 32,
35
  "trial_name": null,
36
  "trial_params": null
 
1
  {
2
+ "best_metric": 163.63636363636365,
3
+ "best_model_checkpoint": "../openai/whisper-tiny-finetuned/checkpoint-50",
4
+ "epoch": 49.02,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
23
  "eval_steps_per_second": 0.107,
24
  "eval_wer": 218.18181818181816,
25
  "step": 25
26
+ },
27
+ {
28
+ "epoch": 49.02,
29
+ "grad_norm": 1.4730064868927002,
30
+ "learning_rate": 0.0,
31
+ "loss": 0.9033,
32
+ "step": 50
33
+ },
34
+ {
35
+ "epoch": 49.02,
36
+ "eval_loss": 2.1805198192596436,
37
+ "eval_runtime": 8.1444,
38
+ "eval_samples_per_second": 0.491,
39
+ "eval_steps_per_second": 0.123,
40
+ "eval_wer": 163.63636363636365,
41
+ "step": 50
42
  }
43
  ],
44
  "logging_steps": 25,
 
46
  "num_input_tokens_seen": 0,
47
  "num_train_epochs": 9223372036854775807,
48
  "save_steps": 25,
49
+ "total_flos": 1.7827762176e+16,
50
  "train_batch_size": 32,
51
  "trial_name": null,
52
  "trial_params": null