MohamedAhmedAE commited on
Commit
9b3e810
1 Parent(s): 24f09a4

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de0a235f3533624d434eb2d4d1fc3cb99011c3c8700866ddf368ac7140d739a8
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e031d128a56e1f2c0d13eb1bdf24b11e3161a5c84ee640ffe8148c5cde2e7779
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8381471a40e6579f72ce5916fb91b4c9d44dc18b662e96d8ec470ca08ac11f3f
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68b7b1fbf5329eb8a91ba2dd7567fcfc9167143f8b5b1e8a308611118d62b893
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e60fc8f32eff402dbfdf1f21a411fe615eb6cdc026d0f67f4f85662b0ad8390e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d51ba13d8ecce76d2ed0ab5874b7250d2ee372ca336a46f1c56ac6d02ec40073
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13c6fd8dd9ac5f94a3fddc347a180544a422ef0ac785334b57fd9dde62a57e91
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c9e9f1ae41fa1e5ee80b76c39d1d8aae5af530178bf78fd25d0ba3cffdc224c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.001041128285596362,
5
  "eval_steps": 2000,
6
- "global_step": 1400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -56,6 +56,13 @@
56
  "learning_rate": 1.9999997884752155e-05,
57
  "loss": 1.6211,
58
  "step": 1400
 
 
 
 
 
 
 
59
  }
60
  ],
61
  "logging_steps": 200,
@@ -63,7 +70,7 @@
63
  "num_input_tokens_seen": 0,
64
  "num_train_epochs": 5,
65
  "save_steps": 200,
66
- "total_flos": 1.8055337626484736e+16,
67
  "train_batch_size": 1,
68
  "trial_name": null,
69
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0011898608978244138,
5
  "eval_steps": 2000,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
56
  "learning_rate": 1.9999997884752155e-05,
57
  "loss": 1.6211,
58
  "step": 1400
59
+ },
60
+ {
61
+ "epoch": 0.0,
62
+ "grad_norm": 4.068266868591309,
63
+ "learning_rate": 1.9999997233256404e-05,
64
+ "loss": 1.6001,
65
+ "step": 1600
66
  }
67
  ],
68
  "logging_steps": 200,
 
70
  "num_input_tokens_seen": 0,
71
  "num_train_epochs": 5,
72
  "save_steps": 200,
73
+ "total_flos": 2.07320002916352e+16,
74
  "train_batch_size": 1,
75
  "trial_name": null,
76
  "trial_params": null