Femboyuwu2000 commited on
Commit
5b52b3c
1 Parent(s): 7afc73b

Training in progress, step 7240, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04106cea74c9b7217608ed5335434aec139928b980a1d98f13c1d65ad9a2757e
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9f04251e463450f35446dc22e021f6c9044fcdc00275a99ce69fc6afd696a20
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44c17ba484ea8b4f6a5dc4fb01470e7b5de3b2280c1d5dfa43045e816b3035ab
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5419bb4aaab7e0dd3ffd695a7210671fdaf57f5ee5535e99d1328b186ac853c3
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:515ac92b399b536a4a0990d851348426453ad5a34a8114a85806dc0f2120c04c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13dea0908d6d2c4a230edba574b9ef1b909b1f3e549fde10d325fea8eadf947f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:612c3aa776f0b1615bfed5f7770ad478e4281d699f7ba3dca66036c0a3ba855a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:957eed2d1d64bc3d33fd608918e084dfadc6685aa1ba1a370b937a7f5aa867a5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5776,
5
  "eval_steps": 500,
6
- "global_step": 7220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2534,6 +2534,13 @@
2534
  "learning_rate": 2.217516409129699e-05,
2535
  "loss": 3.5408,
2536
  "step": 7220
 
 
 
 
 
 
 
2537
  }
2538
  ],
2539
  "logging_steps": 20,
@@ -2541,7 +2548,7 @@
2541
  "num_input_tokens_seen": 0,
2542
  "num_train_epochs": 2,
2543
  "save_steps": 20,
2544
- "total_flos": 1.7055072218185728e+16,
2545
  "train_batch_size": 8,
2546
  "trial_name": null,
2547
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5792,
5
  "eval_steps": 500,
6
+ "global_step": 7240,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2534
  "learning_rate": 2.217516409129699e-05,
2535
  "loss": 3.5408,
2536
  "step": 7220
2537
+ },
2538
+ {
2539
+ "epoch": 0.58,
2540
+ "grad_norm": 42.050819396972656,
2541
+ "learning_rate": 2.2132463751155815e-05,
2542
+ "loss": 3.4422,
2543
+ "step": 7240
2544
  }
2545
  ],
2546
  "logging_steps": 20,
 
2548
  "num_input_tokens_seen": 0,
2549
  "num_train_epochs": 2,
2550
  "save_steps": 20,
2551
+ "total_flos": 1.7108381826121728e+16,
2552
  "train_batch_size": 8,
2553
  "trial_name": null,
2554
  "trial_params": null