mikhail-panzo commited on
Commit
b33ec30
1 Parent(s): 3d36971

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec22c3d9bcd6353319973a9b948e015bf2b55e5b016dd4a047ac623b8d75a936
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b3878b2837c54460760b7689511b1b119ade195a40bcc1bd58d5a2fa1c333cc
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc8ad9821b5b1d9d37cd3a4a43587f5f9ffc47c277706630426b4c035c536e57
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15856761cc33aa3ef4082afef8bcd92b764bb43930646ef8255717cfd74acefa
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a010ad19aaaa343fbfbf6d1ce05dfd1e56fb0ac07188b993fc06e025d81ccc1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc65651f7b378ef8fe4da6f30ced75150f8b4133aff002554882ce4b7f48b94e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8210f8e2249c280ac4965b4cc059199e79e619c33beb58bb250012029c0aaadf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a94e74103068a5fb8b31be8524087df4f57bd43f07d1db2b9d36084882fdbb5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.402164101600647,
3
- "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-1000",
4
- "epoch": 1.675392670157068,
5
  "eval_steps": 500,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -163,6 +163,84 @@
163
  "eval_samples_per_second": 31.959,
164
  "eval_steps_per_second": 3.998,
165
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  }
167
  ],
168
  "logging_steps": 50,
@@ -182,7 +260,7 @@
182
  "attributes": {}
183
  }
184
  },
185
- "total_flos": 1.791435708710208e+16,
186
  "train_batch_size": 16,
187
  "trial_name": null,
188
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3853827118873596,
3
+ "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-1500",
4
+ "epoch": 2.513089005235602,
5
  "eval_steps": 500,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
163
  "eval_samples_per_second": 31.959,
164
  "eval_steps_per_second": 3.998,
165
  "step": 1000
166
+ },
167
+ {
168
+ "epoch": 1.7591623036649215,
169
+ "grad_norm": 4.97900390625,
170
+ "learning_rate": 5.245e-05,
171
+ "loss": 0.4553,
172
+ "step": 1050
173
+ },
174
+ {
175
+ "epoch": 1.8429319371727748,
176
+ "grad_norm": 1.9889676570892334,
177
+ "learning_rate": 5.495e-05,
178
+ "loss": 0.449,
179
+ "step": 1100
180
+ },
181
+ {
182
+ "epoch": 1.9267015706806283,
183
+ "grad_norm": 1.5135546922683716,
184
+ "learning_rate": 5.745e-05,
185
+ "loss": 0.4353,
186
+ "step": 1150
187
+ },
188
+ {
189
+ "epoch": 2.0104712041884816,
190
+ "grad_norm": 7.610673904418945,
191
+ "learning_rate": 5.995000000000001e-05,
192
+ "loss": 0.4311,
193
+ "step": 1200
194
+ },
195
+ {
196
+ "epoch": 2.094240837696335,
197
+ "grad_norm": 2.049562454223633,
198
+ "learning_rate": 6.245000000000001e-05,
199
+ "loss": 0.4312,
200
+ "step": 1250
201
+ },
202
+ {
203
+ "epoch": 2.1780104712041886,
204
+ "grad_norm": 1.4102027416229248,
205
+ "learning_rate": 6.494999999999999e-05,
206
+ "loss": 0.4282,
207
+ "step": 1300
208
+ },
209
+ {
210
+ "epoch": 2.261780104712042,
211
+ "grad_norm": 1.701119065284729,
212
+ "learning_rate": 6.745e-05,
213
+ "loss": 0.4272,
214
+ "step": 1350
215
+ },
216
+ {
217
+ "epoch": 2.345549738219895,
218
+ "grad_norm": 2.0149667263031006,
219
+ "learning_rate": 6.995e-05,
220
+ "loss": 0.4277,
221
+ "step": 1400
222
+ },
223
+ {
224
+ "epoch": 2.4293193717277486,
225
+ "grad_norm": 2.1658883094787598,
226
+ "learning_rate": 7.245000000000001e-05,
227
+ "loss": 0.4247,
228
+ "step": 1450
229
+ },
230
+ {
231
+ "epoch": 2.513089005235602,
232
+ "grad_norm": 2.6821463108062744,
233
+ "learning_rate": 7.495e-05,
234
+ "loss": 0.4169,
235
+ "step": 1500
236
+ },
237
+ {
238
+ "epoch": 2.513089005235602,
239
+ "eval_loss": 0.3853827118873596,
240
+ "eval_runtime": 269.2607,
241
+ "eval_samples_per_second": 31.527,
242
+ "eval_steps_per_second": 3.944,
243
+ "step": 1500
244
  }
245
  ],
246
  "logging_steps": 50,
 
260
  "attributes": {}
261
  }
262
  },
263
+ "total_flos": 2.686672014814656e+16,
264
  "train_batch_size": 16,
265
  "trial_name": null,
266
  "trial_params": null