rizkyjun commited on
Commit
359c37c
1 Parent(s): a6af7fe

Training in progress, step 4100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1bf6e29139b0f24b399e976d09d4884b6235ffdaaa52ee8a0b7938baae37b428
3
  size 31466288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4492da231f945853f6c83ca380b96f25222aa65080b3b146679cd0a751f8627
3
  size 31466288
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7a42b3115d3199d3743de0e8e6b98305c990ec30c405bfdddcdad84bc32c6e1
3
  size 62950917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59596cd46764af0061bceb411c43f6515caf05658f5e0c0563785422ea1da3b2
3
  size 62950917
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6367fbbdff331ec434b7f226419eafab1e9622cbf8e5630817798ff44dfdb8a
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cab967de45c663aa30264993bcda98120463fb94906abc05f58530bcb7e737be
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:593f43b0c83adb27a2db37a6418c2ef12a213bbc2a02f2dc881de6846a69a931
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f55ebe991e4838deda9ff3044672335e03697b65b3fe0753cca61998e974ee51
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.1375267505645752,
3
- "best_model_checkpoint": "./outputs/checkpoint-4000",
4
- "epoch": 2.9143897996357016,
5
  "eval_steps": 100,
6
- "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -567,13 +567,27 @@
567
  "eval_samples_per_second": 11.275,
568
  "eval_steps_per_second": 1.411,
569
  "step": 4000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570
  }
571
  ],
572
  "logging_steps": 100,
573
  "max_steps": 4116,
574
  "num_train_epochs": 3,
575
  "save_steps": 100,
576
- "total_flos": 1.0412060628345815e+18,
577
  "trial_name": null,
578
  "trial_params": null
579
  }
 
1
  {
2
+ "best_metric": 1.1210918426513672,
3
+ "best_model_checkpoint": "./outputs/checkpoint-4100",
4
+ "epoch": 2.987249544626594,
5
  "eval_steps": 100,
6
+ "global_step": 4100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
567
  "eval_samples_per_second": 11.275,
568
  "eval_steps_per_second": 1.411,
569
  "step": 4000
570
+ },
571
+ {
572
+ "epoch": 2.99,
573
+ "learning_rate": 0.0002,
574
+ "loss": 1.0693,
575
+ "step": 4100
576
+ },
577
+ {
578
+ "epoch": 2.99,
579
+ "eval_loss": 1.1210918426513672,
580
+ "eval_runtime": 556.5587,
581
+ "eval_samples_per_second": 11.273,
582
+ "eval_steps_per_second": 1.41,
583
+ "step": 4100
584
  }
585
  ],
586
  "logging_steps": 100,
587
  "max_steps": 4116,
588
  "num_train_epochs": 3,
589
  "save_steps": 100,
590
+ "total_flos": 1.0672136421373379e+18,
591
  "trial_name": null,
592
  "trial_params": null
593
  }