MohamedAhmedAE commited on
Commit
1f8f7aa
1 Parent(s): b26ddd4

Training in progress, step 6200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df13651e277a0c57568ab8ad1700485f28f93fce313b51d305bf49236a22d848
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:814f9725a2fe2017d81c7865800e3fc5aaa4d89e46fab595b01bbf4f7f20db30
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f6d8560b09a8acbd520a643f2c903a1b4abe534eed07f393b76e7e3b4383130
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfd0fc04c6dd797da3f4131745048e2e54e143452e73d060a7d4b95cdcc46ab1
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:387d439f709c593f39d19f3e24beeef5450e32e99e883dfbed8c69aedc09f310
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6abe2d849c4fd1f2d539f784dcb3eeec5c7c22dff6a560aa1f6ccd308e25b9e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db17511cf40c553b235e796d2decd760897da6988ddf690cb68bdac8e17c1364
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76d8049c15ee2a00e055cffd8620c280fe7aa42e36ed292a62ff445777eff25a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.004461978366841551,
5
  "eval_steps": 2000,
6
- "global_step": 6000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -217,6 +217,13 @@
217
  "learning_rate": 1.999996084464646e-05,
218
  "loss": 1.6252,
219
  "step": 6000
 
 
 
 
 
 
 
220
  }
221
  ],
222
  "logging_steps": 200,
@@ -224,7 +231,7 @@
224
  "num_input_tokens_seen": 0,
225
  "num_train_epochs": 5,
226
  "save_steps": 200,
227
- "total_flos": 7.88654619022295e+16,
228
  "train_batch_size": 1,
229
  "trial_name": null,
230
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.004610710979069603,
5
  "eval_steps": 2000,
6
+ "global_step": 6200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
217
  "learning_rate": 1.999996084464646e-05,
218
  "loss": 1.6252,
219
  "step": 6000
220
+ },
221
+ {
222
+ "epoch": 0.0,
223
+ "grad_norm": 1.8496161699295044,
224
+ "learning_rate": 1.9999958185831053e-05,
225
+ "loss": 1.5803,
226
+ "step": 6200
227
  }
228
  ],
229
  "logging_steps": 200,
 
231
  "num_input_tokens_seen": 0,
232
  "num_train_epochs": 5,
233
  "save_steps": 200,
234
+ "total_flos": 8.142964605100032e+16,
235
  "train_batch_size": 1,
236
  "trial_name": null,
237
  "trial_params": null