Hanzalwi commited on
Commit
316451b
1 Parent(s): a7259fa

Training in progress, step 1900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09a785d95d2d94cdd6f27d57d4bbccdcb3b74011e1b175a74e950c97818e094e
3
  size 25191576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c87b5c832471c3238ea317872535c1ff33bd77a07cb2ada2f5973186fe84a0d
3
  size 25191576
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22551c53a7459e2bc11a7aeae09b253cde9e650a919c7344fc9349d114af755f
3
  size 50444805
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bcf079ab7dfd33196ed96a3da814d53564b805c0861f550898c4c88461933f8
3
  size 50444805
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:670c55e1c37931a635c4dc0ef163d2b6c1b7a374aff2f09fb80d53eeb5e58616
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:432b633e5dda27995d4289f9c9b4c7fc095ba96fbfd82e14d7f383c303196238
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0e0a1736a46fd1627af3c246e44261aaac909256abbd413b5ee5c968f6b2d8e
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2d4b4cadcabd3e17adcc39c43d8312038a1f5c7f53125f288ba4de5ed25396b
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9753542542457581,
3
- "best_model_checkpoint": "./outputs/checkpoint-1800",
4
- "epoch": 2.4,
5
  "eval_steps": 100,
6
- "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -259,6 +259,20 @@
259
  "eval_samples_per_second": 5.451,
260
  "eval_steps_per_second": 0.684,
261
  "step": 1800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  }
263
  ],
264
  "logging_steps": 100,
@@ -266,7 +280,7 @@
266
  "num_input_tokens_seen": 0,
267
  "num_train_epochs": 3,
268
  "save_steps": 100,
269
- "total_flos": 5.0879453793069466e+17,
270
  "trial_name": null,
271
  "trial_params": null
272
  }
 
1
  {
2
+ "best_metric": 0.9726464748382568,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1900",
4
+ "epoch": 2.533333333333333,
5
  "eval_steps": 100,
6
+ "global_step": 1900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
259
  "eval_samples_per_second": 5.451,
260
  "eval_steps_per_second": 0.684,
261
  "step": 1800
262
+ },
263
+ {
264
+ "epoch": 2.53,
265
+ "learning_rate": 0.0002,
266
+ "loss": 0.8292,
267
+ "step": 1900
268
+ },
269
+ {
270
+ "epoch": 2.53,
271
+ "eval_loss": 0.9726464748382568,
272
+ "eval_runtime": 353.9329,
273
+ "eval_samples_per_second": 5.453,
274
+ "eval_steps_per_second": 0.684,
275
+ "step": 1900
276
  }
277
  ],
278
  "logging_steps": 100,
 
280
  "num_input_tokens_seen": 0,
281
  "num_train_epochs": 3,
282
  "save_steps": 100,
283
+ "total_flos": 5.3702940947828736e+17,
284
  "trial_name": null,
285
  "trial_params": null
286
  }