Hanzalwi commited on
Commit
7632759
1 Parent(s): a63de5a

Training in progress, step 2200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f2fca0462d584fde43c65c7b1a8433e500b640b3353fdaab1b986837aa1c57e
3
  size 25191576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b886c2efef58451467ac8bcbd63c17273e6cfdca636ea55881dae47720aeb3c
3
  size 25191576
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3aaa6acb1fd5ef798f5bd201cfbbfc408fec3b2be83056659eda5ea3d1f866c1
3
  size 50444805
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df9e7c7ee5e1b40cf3b9bade9f9f9e81fdb6a6bea0f0cb164255b43c1a15fc1
3
  size 50444805
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f905070f344ff5757d2e5f48fcda24ded17f953888429aa8e5791b2828fc54a
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8add635ab6b081112ff7d255bb9a21a69af8460dd1de7ddf6c5c5d319e909536
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1238b3bfa75e49a19396161e9e7b72ab89cdd1a3f63b51c0ab4d6e8d216c5a5
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f10a983aa914555fea6e5c0db8d7ddbaebbe7e28546c78ee0e93ac76cbc28436
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9657241106033325,
3
- "best_model_checkpoint": "./outputs/checkpoint-2100",
4
- "epoch": 2.8,
5
  "eval_steps": 100,
6
- "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -301,6 +301,20 @@
301
  "eval_samples_per_second": 5.241,
302
  "eval_steps_per_second": 0.657,
303
  "step": 2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  }
305
  ],
306
  "logging_steps": 100,
@@ -308,7 +322,7 @@
308
  "num_input_tokens_seen": 0,
309
  "num_train_epochs": 3,
310
  "save_steps": 100,
311
- "total_flos": 5.936315915975148e+17,
312
  "trial_name": null,
313
  "trial_params": null
314
  }
 
1
  {
2
+ "best_metric": 0.9637255072593689,
3
+ "best_model_checkpoint": "./outputs/checkpoint-2200",
4
+ "epoch": 2.9333333333333336,
5
  "eval_steps": 100,
6
+ "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
301
  "eval_samples_per_second": 5.241,
302
  "eval_steps_per_second": 0.657,
303
  "step": 2100
304
+ },
305
+ {
306
+ "epoch": 2.93,
307
+ "learning_rate": 0.0002,
308
+ "loss": 0.8358,
309
+ "step": 2200
310
+ },
311
+ {
312
+ "epoch": 2.93,
313
+ "eval_loss": 0.9637255072593689,
314
+ "eval_runtime": 365.4459,
315
+ "eval_samples_per_second": 5.281,
316
+ "eval_steps_per_second": 0.662,
317
+ "step": 2200
318
  }
319
  ],
320
  "logging_steps": 100,
 
322
  "num_input_tokens_seen": 0,
323
  "num_train_epochs": 3,
324
  "save_steps": 100,
325
+ "total_flos": 6.220289728654295e+17,
326
  "trial_name": null,
327
  "trial_params": null
328
  }