Hanzalwi commited on
Commit
d4c7f52
1 Parent(s): d958fb3

Training in progress, step 3600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bafbc247ce2c8953c6683f90d87f8bd8ba37cf8ae0a314ca9b696b213130d6e
3
  size 19669752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:385b9b5f02bffd7fc21b5b6099caf720ae559e7d2c40f610dadf97aa98914ee6
3
  size 19669752
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89cd32755e389b4400e83f3821ea5ace462c4e5fb0ddcdffabfe08ffe0d174ff
3
  size 39356997
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38e59bc166d16c5098bd04f0c12f6dd5bc1d2edb5232c6c8d36feee70c1003a
3
  size 39356997
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7d9372711d9e99428275dc2ac133f7557aed0f81e715109fa05ba4f94426d2d
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7212374cd916676c44e1c9a2f0614a7d23ee62995154c5af0a7ae8272789312
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:31f466e63f2d702a9646f61d3cb0499d7a443ca833cfea51694a53eaa24cfd01
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e3cf8d56a3f083d00cc85544d76ada2f884a1018c8752332d96f2799911117
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8955456614494324,
3
- "best_model_checkpoint": "./outputs/checkpoint-3500",
4
- "epoch": 4.666666666666667,
5
  "eval_steps": 100,
6
- "global_step": 3500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -497,6 +497,20 @@
497
  "eval_samples_per_second": 2.424,
498
  "eval_steps_per_second": 0.304,
499
  "step": 3500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  }
501
  ],
502
  "logging_steps": 100,
@@ -504,7 +518,7 @@
504
  "num_input_tokens_seen": 0,
505
  "num_train_epochs": 5,
506
  "save_steps": 100,
507
- "total_flos": 9.06455971769303e+17,
508
  "trial_name": null,
509
  "trial_params": null
510
  }
 
1
  {
2
+ "best_metric": 0.8900671601295471,
3
+ "best_model_checkpoint": "./outputs/checkpoint-3600",
4
+ "epoch": 4.8,
5
  "eval_steps": 100,
6
+ "global_step": 3600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
497
  "eval_samples_per_second": 2.424,
498
  "eval_steps_per_second": 0.304,
499
  "step": 3500
500
+ },
501
+ {
502
+ "epoch": 4.8,
503
+ "learning_rate": 0.0002,
504
+ "loss": 0.6882,
505
+ "step": 3600
506
+ },
507
+ {
508
+ "epoch": 4.8,
509
+ "eval_loss": 0.8900671601295471,
510
+ "eval_runtime": 776.8193,
511
+ "eval_samples_per_second": 2.484,
512
+ "eval_steps_per_second": 0.312,
513
+ "step": 3600
514
  }
515
  ],
516
  "logging_steps": 100,
 
518
  "num_input_tokens_seen": 0,
519
  "num_train_epochs": 5,
520
  "save_steps": 100,
521
+ "total_flos": 9.323259516653568e+17,
522
  "trial_name": null,
523
  "trial_params": null
524
  }