MohamedAhmedAE commited on
Commit
9c1858d
1 Parent(s): a4776e2

Training in progress, step 6600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6787e0eaa9a56e0fa6384e31cadb399d551788e841dc96c421a54c989efc069
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23ecbd17698411abba08f800223d4f97c20ce6694fc5497eae762462f06c3ee3
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93664eaaa2494807cb13b7289c551278a5569b899724bfce116b35cb065a15b1
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e240a4ccbdf1ba5b7269029cb4c7fbd098170a36530fb8a7924ca7a51629862
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06ae7e7d534fe7c098495e70ee979302efce6ee6dcf9f99dc5c535716646d42a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fcac68827b9f46f3ac71f10fc39a641a04d0ffa77754feb125895a9297c3035
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61184dac90fcc9cbb0e85bbabd16a1b7709c95fe49eceb7bec3bd299bccf398e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a5a6b913ef84b8f56bbbc750706d31053f1c2d4afa8b1e983e030aad31a2507
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.004759443591297655,
5
  "eval_steps": 2000,
6
- "global_step": 6400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -231,6 +231,13 @@
231
  "learning_rate": 1.999995543968414e-05,
232
  "loss": 1.6274,
233
  "step": 6400
 
 
 
 
 
 
 
234
  }
235
  ],
236
  "logging_steps": 200,
@@ -238,7 +245,7 @@
238
  "num_input_tokens_seen": 0,
239
  "num_train_epochs": 5,
240
  "save_steps": 200,
241
- "total_flos": 8.403014572720128e+16,
242
  "train_batch_size": 1,
243
  "trial_name": null,
244
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.004908176203525707,
5
  "eval_steps": 2000,
6
+ "global_step": 6600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
231
  "learning_rate": 1.999995543968414e-05,
232
  "loss": 1.6274,
233
  "step": 6400
234
+ },
235
+ {
236
+ "epoch": 0.0,
237
+ "grad_norm": 1.9622883796691895,
238
+ "learning_rate": 1.9999952606205736e-05,
239
+ "loss": 1.6222,
240
+ "step": 6600
241
  }
242
  ],
243
  "logging_steps": 200,
 
245
  "num_input_tokens_seen": 0,
246
  "num_train_epochs": 5,
247
  "save_steps": 200,
248
+ "total_flos": 8.670264252137472e+16,
249
  "train_batch_size": 1,
250
  "trial_name": null,
251
  "trial_params": null