SpideyDLK commited on
Commit
8bd16b1
·
verified ·
1 Parent(s): ebed109

Training in progress, step 19600, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6352187cf385397f40eed62cf701dec875c7e3375118d91430a850443ab1f27d
3
  size 1262135480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61f07e30ba6c3e4d46423b96f0cbcbd82106b6bb256c1c12e811925d96d026b5
3
  size 1262135480
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0cfa6e921875d63852f96db976fdc599ce3aea4b10926ddb0746660a5a59dc4
3
  size 2490815798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67dae5c921c1d0e3ec0ef2ca745ddb83ae850f785c418cb799e8d39e9977effb
3
  size 2490815798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a7a0041b53de4a505be9cc74a61653f340f65bd90c8c4022f66bf9eaf4ab421
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed632af97046c1419a3b3bfb31bf5b26a7fe13c4e9fee7877e803a6d9b9ab78
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46e26e61dfa7a5f81fbf493f586e17fb54189c196dc287ec04a698210453fdf9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed90542fa85abfeb1fc1d8ba1917d59ae062284951ce0a75c17787d5046ce253
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 13.025780189959294,
5
  "eval_steps": 400,
6
- "global_step": 19200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -775,6 +775,22 @@
775
  "eval_steps_per_second": 0.831,
776
  "eval_wer": 0.057695270374364505,
777
  "step": 19200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778
  }
779
  ],
780
  "logging_steps": 400,
@@ -782,7 +798,7 @@
782
  "num_input_tokens_seen": 0,
783
  "num_train_epochs": 30,
784
  "save_steps": 400,
785
- "total_flos": 5.85537722383966e+19,
786
  "train_batch_size": 8,
787
  "trial_name": null,
788
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 13.297150610583447,
5
  "eval_steps": 400,
6
+ "global_step": 19600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
775
  "eval_steps_per_second": 0.831,
776
  "eval_wer": 0.057695270374364505,
777
  "step": 19200
778
+ },
779
+ {
780
+ "epoch": 13.3,
781
+ "grad_norm": 0.4078648090362549,
782
+ "learning_rate": 0.00016893870082342174,
783
+ "loss": 0.0779,
784
+ "step": 19600
785
+ },
786
+ {
787
+ "epoch": 13.3,
788
+ "eval_loss": 0.03577824681997299,
789
+ "eval_runtime": 210.5433,
790
+ "eval_samples_per_second": 6.645,
791
+ "eval_steps_per_second": 0.831,
792
+ "eval_wer": 0.06208596518256047,
793
+ "step": 19600
794
  }
795
  ],
796
  "logging_steps": 400,
 
798
  "num_input_tokens_seen": 0,
799
  "num_train_epochs": 30,
800
  "save_steps": 400,
801
+ "total_flos": 5.978374840640582e+19,
802
  "train_batch_size": 8,
803
  "trial_name": null,
804
  "trial_params": null