Femboyuwu2000 commited on
Commit
30de8dc
1 Parent(s): eb6188a

Training in progress, step 3660, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d54e67b174db8aa6d78fd7e5ad7cc14e255ae9e2ac4b07617e63a3c6e1f44fd1
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbcfaec3e348c7d2277c6b26e72513d8f233c50f6153d77f79fb16607816e019
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a538e98a56080dc8b839b137de6d2baa98652765c46f5d7d5638ebbff1fc24e
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e9a7c0408bb21c6add531c8ecf332ee65e8da095897c11526a8a71159a612bd
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77ef44b95127212e765792a1cf83babb6419dbbbc5242f9919c1607c1deb549f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdcee77be9827de8d12c3593b0e445ae8e58c98fda9d45171de520fb50310c0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6436b03258a74e4e347dfaffcfa88289f9f39a08c2a52b4713027be6b232347
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75387a128862b4898057b986470075974f53b66436a53a12635d2da9900d5d04
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2912,
5
  "eval_steps": 500,
6
- "global_step": 3640,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1281,6 +1281,13 @@
1281
  "learning_rate": 2.8218786465788984e-05,
1282
  "loss": 3.6001,
1283
  "step": 3640
 
 
 
 
 
 
 
1284
  }
1285
  ],
1286
  "logging_steps": 20,
@@ -1288,7 +1295,7 @@
1288
  "num_input_tokens_seen": 0,
1289
  "num_train_epochs": 2,
1290
  "save_steps": 20,
1291
- "total_flos": 8607877788991488.0,
1292
  "train_batch_size": 8,
1293
  "trial_name": null,
1294
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2928,
5
  "eval_steps": 500,
6
+ "global_step": 3660,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1281
  "learning_rate": 2.8218786465788984e-05,
1282
  "loss": 3.6001,
1283
  "step": 3640
1284
+ },
1285
+ {
1286
+ "epoch": 0.29,
1287
+ "grad_norm": 27.951440811157227,
1288
+ "learning_rate": 2.8195755409920584e-05,
1289
+ "loss": 3.4387,
1290
+ "step": 3660
1291
  }
1292
  ],
1293
  "logging_steps": 20,
 
1295
  "num_input_tokens_seen": 0,
1296
  "num_train_epochs": 2,
1297
  "save_steps": 20,
1298
+ "total_flos": 8653740454772736.0,
1299
  "train_batch_size": 8,
1300
  "trial_name": null,
1301
  "trial_params": null