Hanzalwi commited on
Commit
ab154b6
1 Parent(s): a43c0f0

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86a48053478d228504313eb4977d636a6e9091b988a8f9dac04cec44b10e1309
3
  size 19669752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6cd985980dabccd3e8f1b853973300ade7cc720b5f4448de7f2fc5966008b1f
3
  size 19669752
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30365d6168e85fb5407bc95bc39fae0d4ec71a659eeb7ca370a7cac73687d446
3
  size 39356997
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c88f0d837bce0f8188dc1eec230a27ce022f0fbedfb6728b01561918f0ebe40e
3
  size 39356997
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5710b632f596fa570df2f2575c76e26fe0b3fdacd4ac49afac18b353777ba28e
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ff51ad6df91d0e18c7363540ae4bd74313469b3ed151c6ae2ca115845dc21fc
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d397c26ac1ddd7670b9ddd8b909580a771b707ad5b586b657ad627e8bc4e787f
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcd2587f362188ac4728d4fa6edf8d2b0b6d72db365d49f7b847d4d79e3da09f
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9568483829498291,
3
- "best_model_checkpoint": "./outputs/checkpoint-1500",
4
- "epoch": 2.0,
5
  "eval_steps": 100,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -217,6 +217,20 @@
217
  "eval_samples_per_second": 2.641,
218
  "eval_steps_per_second": 0.331,
219
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  }
221
  ],
222
  "logging_steps": 100,
@@ -224,7 +238,7 @@
224
  "num_input_tokens_seen": 0,
225
  "num_train_epochs": 3,
226
  "save_steps": 100,
227
- "total_flos": 3.884951923281101e+17,
228
  "trial_name": null,
229
  "trial_params": null
230
  }
 
1
  {
2
+ "best_metric": 0.9542460441589355,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1600",
4
+ "epoch": 2.1333333333333333,
5
  "eval_steps": 100,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
217
  "eval_samples_per_second": 2.641,
218
  "eval_steps_per_second": 0.331,
219
  "step": 1500
220
+ },
221
+ {
222
+ "epoch": 2.13,
223
+ "learning_rate": 0.0002,
224
+ "loss": 0.7657,
225
+ "step": 1600
226
+ },
227
+ {
228
+ "epoch": 2.13,
229
+ "eval_loss": 0.9542460441589355,
230
+ "eval_runtime": 730.7608,
231
+ "eval_samples_per_second": 2.641,
232
+ "eval_steps_per_second": 0.331,
233
+ "step": 1600
234
  }
235
  ],
236
  "logging_steps": 100,
 
238
  "num_input_tokens_seen": 0,
239
  "num_train_epochs": 3,
240
  "save_steps": 100,
241
+ "total_flos": 4.14342522878976e+17,
242
  "trial_name": null,
243
  "trial_params": null
244
  }