Hanzalwi commited on
Commit
34dddc4
·
1 Parent(s): e409bde

Training in progress, step 1800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96c2643c948e8c26b7915122497df4ae83b2431b99ef269d2d4d0a8ef6631cdf
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1be781d2ff87c04388aa549d471acb1e75cc1efe382632ff5dc00d5c1ea625ec
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ea28045dbb6354fdb95725f985f2e38a5208180a9cae044b1a856afbe37f579
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ad46c9cf2c24c36d0e90811f32a6a02d31aec3126bc1b48c2d510d7f257d952
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ca9c04f6b7646a0509be2acc7ecfafb1dcb2d23d1ea2918a0e476af152e1e5d
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0909f77b12ea1b25d87482885e6c2cc12d4e855bbbbb4a8ffcede021a58b61fd
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0410ad60e6d0138b921a03b0a3e367fd27c6ab07cb9a5006fcb66ea8e5bbacc4
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0e0a1736a46fd1627af3c246e44261aaac909256abbd413b5ee5c968f6b2d8e
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.3168717622756958,
3
- "best_model_checkpoint": "./outputs/checkpoint-1700",
4
- "epoch": 2.2666666666666666,
5
  "eval_steps": 100,
6
- "global_step": 1700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -245,6 +245,20 @@
245
  "eval_samples_per_second": 30.726,
246
  "eval_steps_per_second": 3.854,
247
  "step": 1700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  }
249
  ],
250
  "logging_steps": 100,
@@ -252,7 +266,7 @@
252
  "num_input_tokens_seen": 0,
253
  "num_train_epochs": 3,
254
  "save_steps": 100,
255
- "total_flos": 5.700115136249856e+16,
256
  "trial_name": null,
257
  "trial_params": null
258
  }
 
1
  {
2
+ "best_metric": 1.3101677894592285,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1800",
4
+ "epoch": 2.4,
5
  "eval_steps": 100,
6
+ "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
245
  "eval_samples_per_second": 30.726,
246
  "eval_steps_per_second": 3.854,
247
  "step": 1700
248
+ },
249
+ {
250
+ "epoch": 2.4,
251
+ "learning_rate": 0.0002,
252
+ "loss": 1.3862,
253
+ "step": 1800
254
+ },
255
+ {
256
+ "epoch": 2.4,
257
+ "eval_loss": 1.3101677894592285,
258
+ "eval_runtime": 47.1712,
259
+ "eval_samples_per_second": 30.76,
260
+ "eval_steps_per_second": 3.858,
261
+ "step": 1800
262
  }
263
  ],
264
  "logging_steps": 100,
 
266
  "num_input_tokens_seen": 0,
267
  "num_train_epochs": 3,
268
  "save_steps": 100,
269
+ "total_flos": 6.035490006368256e+16,
270
  "trial_name": null,
271
  "trial_params": null
272
  }