Hanzalwi commited on
Commit
e7d91d8
1 Parent(s): 5486fb4

Training in progress, step 1800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b82a33ed7a688788e789d3d454ef4700ca56ef8936e03f3bf2c3067a2780c87f
3
  size 12595704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:232b1f56f6ebf344f5c012b7b3e9af10382c003ca7cd01523fe1aaba0c988ec7
3
  size 12595704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1376fb8ed66e9174764b45f00a345809bfd0a927539f1c78adc293f22130c6da
3
  size 25222341
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aef14976ac039f712ae953d0ffd5bca65180c1336d98f5b5fb0d60faf6ff01c
3
  size 25222341
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28bd09398ec9af0456a9ee14166cd961943ffc74000ebb4c1108e377da2d32df
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0ed68540abf53877f31d72dfb49b5e75b15e4cc70f9158d4c016264864ea60a
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0410ad60e6d0138b921a03b0a3e367fd27c6ab07cb9a5006fcb66ea8e5bbacc4
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0e0a1736a46fd1627af3c246e44261aaac909256abbd413b5ee5c968f6b2d8e
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 1.0762122869491577,
3
  "best_model_checkpoint": "./outputs/checkpoint-1700",
4
- "epoch": 2.2666666666666666,
5
  "eval_steps": 100,
6
- "global_step": 1700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -245,6 +245,20 @@
245
  "eval_samples_per_second": 9.412,
246
  "eval_steps_per_second": 1.18,
247
  "step": 1700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  }
249
  ],
250
  "logging_steps": 100,
@@ -252,7 +266,7 @@
252
  "num_input_tokens_seen": 0,
253
  "num_train_epochs": 3,
254
  "save_steps": 100,
255
- "total_flos": 2.403652458643538e+17,
256
  "trial_name": null,
257
  "trial_params": null
258
  }
 
1
  {
2
  "best_metric": 1.0762122869491577,
3
  "best_model_checkpoint": "./outputs/checkpoint-1700",
4
+ "epoch": 2.4,
5
  "eval_steps": 100,
6
+ "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
245
  "eval_samples_per_second": 9.412,
246
  "eval_steps_per_second": 1.18,
247
  "step": 1700
248
+ },
249
+ {
250
+ "epoch": 2.4,
251
+ "learning_rate": 0.0002,
252
+ "loss": 0.9245,
253
+ "step": 1800
254
+ },
255
+ {
256
+ "epoch": 2.4,
257
+ "eval_loss": 1.077301263809204,
258
+ "eval_runtime": 356.8145,
259
+ "eval_samples_per_second": 5.409,
260
+ "eval_steps_per_second": 0.678,
261
+ "step": 1800
262
  }
263
  ],
264
  "logging_steps": 100,
 
266
  "num_input_tokens_seen": 0,
267
  "num_train_epochs": 3,
268
  "save_steps": 100,
269
+ "total_flos": 2.545114966391931e+17,
270
  "trial_name": null,
271
  "trial_params": null
272
  }