MohamedAhmedAE commited on
Commit
c54cb9b
1 Parent(s): 43e292e

Training in progress, step 7600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23743e9690674a1d534f69568011018a38cff6734e126949d34da8a6d936cbe8
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f68c43c7f004c1eadd91b9fe7a30fc2b0fd69b4f58b1bc37e1b8caf12c6810ef
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2b7b03e4ec1e08ee271094d3bc4edd63ca79ae6d7a5da9e2391644347d04f7a
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2e2d7e319df6b910d8660f90fe9f5f432aa3e2fcf0fc9f644ada3faf7f04901
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86d47e244e90ce0cebf2d802c7f97cc46f6230bc0418ded16d90b0ab0fcbef08
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d10e311d00a264a46fa5f0eed688f7217fcace7c41779a2ffd66f55f87f67920
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67201e35f3439e7b0671d46d9e3530d28a0b8d3cd07e4d2782d31a603d7c7dac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18fe99cd7e53dc1def962f8fb0d7ad9ae866de71ef7bffa4b49eb8c6cbd19c21
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.005503106652437913,
5
  "eval_steps": 2000,
6
- "global_step": 7400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -266,6 +266,13 @@
266
  "learning_rate": 1.999994039897775e-05,
267
  "loss": 1.5864,
268
  "step": 7400
 
 
 
 
 
 
 
269
  }
270
  ],
271
  "logging_steps": 200,
@@ -273,7 +280,7 @@
273
  "num_input_tokens_seen": 0,
274
  "num_train_epochs": 5,
275
  "save_steps": 200,
276
- "total_flos": 9.719488667027866e+16,
277
  "train_batch_size": 1,
278
  "trial_name": null,
279
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.005651839264665965,
5
  "eval_steps": 2000,
6
+ "global_step": 7600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
266
  "learning_rate": 1.999994039897775e-05,
267
  "loss": 1.5864,
268
  "step": 7400
269
+ },
270
+ {
271
+ "epoch": 0.01,
272
+ "grad_norm": 2.2379679679870605,
273
+ "learning_rate": 1.9999937128842296e-05,
274
+ "loss": 1.6411,
275
+ "step": 7600
276
  }
277
  ],
278
  "logging_steps": 200,
 
280
  "num_input_tokens_seen": 0,
281
  "num_train_epochs": 5,
282
  "save_steps": 200,
283
+ "total_flos": 9.98269473472512e+16,
284
  "train_batch_size": 1,
285
  "trial_name": null,
286
  "trial_params": null