MohamedAhmedAE commited on
Commit
7835177
1 Parent(s): 2496567

Training in progress, step 8400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:783309b92bf904a4987195d65f9c3e49a0a7df8a83abb12055e5dba735e35ba3
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68399e5c313a915751634cf5ffae9ace9416e38938d0103a268d1d11dce06274
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2c0aba8b4efcec01add15ebdb21629b6fb824568690d50ff04fee5ba95bce6f
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4548ac75442441bb1d5da3687ffb1363df171933d0e50d8f215a23de567a8a6b
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6e69560bad23a6fb4e8cc5cd668902a0c825d77164f5297a8878eeef71f6dbf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d48374e1e6b1447bbeee1d70f64941ccb28e15e76b5206f65f89b25b037eb922
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:612d436028557394444e8286b0ca62afad5efea9ac857621a2bc4c96f8a94c3b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:877cbfb7de479f62248634d105d7b3cf3fb2397e07f0e8b78265a455155762d7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00609803710135012,
5
  "eval_steps": 2000,
6
- "global_step": 8200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -294,6 +294,13 @@
294
  "learning_rate": 1.999992679444808e-05,
295
  "loss": 1.5118,
296
  "step": 8200
 
 
 
 
 
 
 
297
  }
298
  ],
299
  "logging_steps": 200,
@@ -301,7 +308,7 @@
301
  "num_input_tokens_seen": 0,
302
  "num_train_epochs": 5,
303
  "save_steps": 200,
304
- "total_flos": 1.0738682585918669e+17,
305
  "train_batch_size": 1,
306
  "trial_name": null,
307
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.006246769713578172,
5
  "eval_steps": 2000,
6
+ "global_step": 8400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
294
  "learning_rate": 1.999992679444808e-05,
295
  "loss": 1.5118,
296
  "step": 8200
297
+ },
298
+ {
299
+ "epoch": 0.01,
300
+ "grad_norm": 4.383536338806152,
301
+ "learning_rate": 1.9999923174987494e-05,
302
+ "loss": 1.5533,
303
+ "step": 8400
304
  }
305
  ],
306
  "logging_steps": 200,
 
308
  "num_input_tokens_seen": 0,
309
  "num_train_epochs": 5,
310
  "save_steps": 200,
311
+ "total_flos": 1.0986977552390554e+17,
312
  "train_batch_size": 1,
313
  "trial_name": null,
314
  "trial_params": null