MohamedAhmedAE commited on
Commit
912cab8
1 Parent(s): 57c915b

Training in progress, step 5800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a30737d4c22b4b26e4123c801ceb26aa4e876b6d95f0b62cad4d768ed46f2ce6
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cea91fb1ecf648cc0d9b1c34c231eeb11e493def8af1b24626b09ec699dcc574
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c4b879409f22641359b928d1e77271642a5b4639b676ff813e6ec4fed297a16
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c0cba11636076b32b316427e9e0cc1651d3ec55ea03d8d1f601d48d89153db6
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12a1c117c19e73bcd669e165d231b727aab93d182a0e0acc0ec7506cb4907381
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98af83b6a2d5ccf06ac67101b7d27db5e186113101b0ca8f3d71a5ca34f809f8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:daecd8e9a7fb8ab89776f0aa83b998d3983e03adcf54b58e801c6e6de09593a8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b9669fdeac6f8ea3f9f111ac5b46d4674ad051ccbc5e8083a6ee1a8eb3361c0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.004164513142385448,
5
  "eval_steps": 2000,
6
- "global_step": 5600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -203,6 +203,13 @@
203
  "learning_rate": 1.999996590028264e-05,
204
  "loss": 1.5651,
205
  "step": 5600
 
 
 
 
 
 
 
206
  }
207
  ],
208
  "logging_steps": 200,
@@ -210,7 +217,7 @@
210
  "num_input_tokens_seen": 0,
211
  "num_train_epochs": 5,
212
  "save_steps": 200,
213
- "total_flos": 7.351449119465472e+16,
214
  "train_batch_size": 1,
215
  "trial_name": null,
216
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0043132457546134996,
5
  "eval_steps": 2000,
6
+ "global_step": 5800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
203
  "learning_rate": 1.999996590028264e-05,
204
  "loss": 1.5651,
205
  "step": 5600
206
+ },
207
+ {
208
+ "epoch": 0.0,
209
+ "grad_norm": 4.262922763824463,
210
+ "learning_rate": 1.9999963416130326e-05,
211
+ "loss": 1.6067,
212
+ "step": 5800
213
  }
214
  ],
215
  "logging_steps": 200,
 
217
  "num_input_tokens_seen": 0,
218
  "num_train_epochs": 5,
219
  "save_steps": 200,
220
+ "total_flos": 7.611114196832256e+16,
221
  "train_batch_size": 1,
222
  "trial_name": null,
223
  "trial_params": null