Hanzalwi commited on
Commit
f85a02f
1 Parent(s): f88a7ec

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:280f24b69ed2f388e06fe78ab19f755b372c01f593abbb544fff93efa4098e07
3
  size 6304192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ae05965162aa06eb88bf36cda7caef25229591c906e5d97e19d64b11ccaaba5
3
  size 6304192
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67b6a63014dfc8220d6c4ebd78976e4d61bf5ca40b12c9d8617dfaedb7bf006c
3
  size 12639429
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1de161225d142d6a89e02265ba2f3f35155c52ceb5f2308b745a95d2890ece9
3
  size 12639429
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:926d0f02ed35ff822c8351520aea564c896c73584b204d9f89321ceacb12846d
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354336e3d2096475e2dbe89576e77c5e214cdcb69bb19ba1dc01b08c50a83b5a
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5efc416a6883409dd7ab6f5c779e107c7c2baa7af6e12ed9fbd9dd73b8b20784
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1a2ac1c11599601eeac95feb1dbfd49ec5c625e61dcce18b3f094491f9cf2d
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.2894083261489868,
3
- "best_model_checkpoint": "./outputs/checkpoint-500",
4
- "epoch": 0.6666666666666666,
5
  "eval_steps": 100,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -77,6 +77,20 @@
77
  "eval_samples_per_second": 15.261,
78
  "eval_steps_per_second": 1.914,
79
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  }
81
  ],
82
  "logging_steps": 100,
@@ -84,7 +98,7 @@
84
  "num_input_tokens_seen": 0,
85
  "num_train_epochs": 3,
86
  "save_steps": 100,
87
- "total_flos": 1.7727133699817472e+16,
88
  "trial_name": null,
89
  "trial_params": null
90
  }
 
1
  {
2
+ "best_metric": 1.2811017036437988,
3
+ "best_model_checkpoint": "./outputs/checkpoint-600",
4
+ "epoch": 0.8,
5
  "eval_steps": 100,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
77
  "eval_samples_per_second": 15.261,
78
  "eval_steps_per_second": 1.914,
79
  "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.8,
83
+ "learning_rate": 0.0002,
84
+ "loss": 1.1142,
85
+ "step": 600
86
+ },
87
+ {
88
+ "epoch": 0.8,
89
+ "eval_loss": 1.2811017036437988,
90
+ "eval_runtime": 126.9356,
91
+ "eval_samples_per_second": 15.205,
92
+ "eval_steps_per_second": 1.906,
93
+ "step": 600
94
  }
95
  ],
96
  "logging_steps": 100,
 
98
  "num_input_tokens_seen": 0,
99
  "num_train_epochs": 3,
100
  "save_steps": 100,
101
+ "total_flos": 2.126816116772045e+16,
102
  "trial_name": null,
103
  "trial_params": null
104
  }