plip commited on
Commit
e099446
1 Parent(s): 756fa8d

Training in progress, step 20000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9db25d01d664aab4cb98e082b28d4b9aabe9c3484451098075c4a39951609908
3
  size 50044241
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ff30f34c03f4aee5e5fd09a1aacd3d7bdd3d177d4232672590b18570ebd00ab
3
  size 50044241
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4777aef65282d7381884640132b86532faacb12845758f068a2aac0ed0646ae
3
  size 25761253
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee26862b665697be023c901e312db258d269e332dc1eb7fb87e516e00bf064a6
3
  size 25761253
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54025b1e5c20b5516d968c152992bafd1f062d301f6ee50e676cd70067564190
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b2c87ce00f88a26045daeef95098b32592be610ef6295c77558bd47e7dfa25
3
  size 14503
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:595e6d0c765b4677e7c91d65c2aacefa0d09faec0213d2321b49d411358f597f
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fa6f3c4ec253d8129a7481e01148ec46428b7a6eb1631c7fb589fd92c25c12f
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.6127450980392157,
5
- "global_step": 10000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -206,11 +206,211 @@
206
  "eval_samples_per_second": 807.983,
207
  "eval_steps_per_second": 12.928,
208
  "step": 10000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  }
210
  ],
211
  "max_steps": 250000,
212
  "num_train_epochs": 16,
213
- "total_flos": 1.6016800286580408e+20,
214
  "trial_name": null,
215
  "trial_params": null
216
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.2254901960784315,
5
+ "global_step": 20000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
206
  "eval_samples_per_second": 807.983,
207
  "eval_steps_per_second": 12.928,
208
  "step": 10000
209
+ },
210
+ {
211
+ "epoch": 0.64,
212
+ "learning_rate": 0.0005039999999999999,
213
+ "loss": 0.7369,
214
+ "step": 10500
215
+ },
216
+ {
217
+ "epoch": 0.67,
218
+ "learning_rate": 0.0005279999999999999,
219
+ "loss": 0.7369,
220
+ "step": 11000
221
+ },
222
+ {
223
+ "epoch": 0.67,
224
+ "eval_loss": 0.9027426838874817,
225
+ "eval_runtime": 1.2459,
226
+ "eval_samples_per_second": 802.657,
227
+ "eval_steps_per_second": 12.843,
228
+ "step": 11000
229
+ },
230
+ {
231
+ "epoch": 0.7,
232
+ "learning_rate": 0.000552,
233
+ "loss": 0.7369,
234
+ "step": 11500
235
+ },
236
+ {
237
+ "epoch": 0.74,
238
+ "learning_rate": 0.0005759999999999999,
239
+ "loss": 0.7368,
240
+ "step": 12000
241
+ },
242
+ {
243
+ "epoch": 0.74,
244
+ "eval_loss": 0.9022310972213745,
245
+ "eval_runtime": 1.2295,
246
+ "eval_samples_per_second": 813.368,
247
+ "eval_steps_per_second": 13.014,
248
+ "step": 12000
249
+ },
250
+ {
251
+ "epoch": 0.77,
252
+ "learning_rate": 0.0006,
253
+ "loss": 0.7367,
254
+ "step": 12500
255
+ },
256
+ {
257
+ "epoch": 0.8,
258
+ "learning_rate": 0.0005999935478721662,
259
+ "loss": 0.7368,
260
+ "step": 13000
261
+ },
262
+ {
263
+ "epoch": 0.8,
264
+ "eval_loss": 0.8987133502960205,
265
+ "eval_runtime": 1.2643,
266
+ "eval_samples_per_second": 790.939,
267
+ "eval_steps_per_second": 12.655,
268
+ "step": 13000
269
+ },
270
+ {
271
+ "epoch": 0.83,
272
+ "learning_rate": 0.000599974191770902,
273
+ "loss": 0.7375,
274
+ "step": 13500
275
+ },
276
+ {
277
+ "epoch": 0.86,
278
+ "learning_rate": 0.0005999419325429058,
279
+ "loss": 0.7374,
280
+ "step": 14000
281
+ },
282
+ {
283
+ "epoch": 0.86,
284
+ "eval_loss": 0.9013972282409668,
285
+ "eval_runtime": 1.236,
286
+ "eval_samples_per_second": 809.042,
287
+ "eval_steps_per_second": 12.945,
288
+ "step": 14000
289
+ },
290
+ {
291
+ "epoch": 0.89,
292
+ "learning_rate": 0.0005998967715993009,
293
+ "loss": 0.7369,
294
+ "step": 14500
295
+ },
296
+ {
297
+ "epoch": 0.92,
298
+ "learning_rate": 0.0005998387109155732,
299
+ "loss": 0.7369,
300
+ "step": 15000
301
+ },
302
+ {
303
+ "epoch": 0.92,
304
+ "eval_loss": 0.9001522660255432,
305
+ "eval_runtime": 1.235,
306
+ "eval_samples_per_second": 809.697,
307
+ "eval_steps_per_second": 12.955,
308
+ "step": 15000
309
+ },
310
+ {
311
+ "epoch": 0.95,
312
+ "learning_rate": 0.000599767753031485,
313
+ "loss": 0.737,
314
+ "step": 15500
315
+ },
316
+ {
317
+ "epoch": 0.98,
318
+ "learning_rate": 0.0005996839010509641,
319
+ "loss": 0.7369,
320
+ "step": 16000
321
+ },
322
+ {
323
+ "epoch": 0.98,
324
+ "eval_loss": 0.9001864790916443,
325
+ "eval_runtime": 1.1685,
326
+ "eval_samples_per_second": 855.784,
327
+ "eval_steps_per_second": 13.693,
328
+ "step": 16000
329
+ },
330
+ {
331
+ "epoch": 1.01,
332
+ "learning_rate": 0.0005995871586419678,
333
+ "loss": 0.7369,
334
+ "step": 16500
335
+ },
336
+ {
337
+ "epoch": 1.04,
338
+ "learning_rate": 0.0005994775300363225,
339
+ "loss": 0.7372,
340
+ "step": 17000
341
+ },
342
+ {
343
+ "epoch": 1.04,
344
+ "eval_loss": 0.9018534421920776,
345
+ "eval_runtime": 1.1865,
346
+ "eval_samples_per_second": 842.819,
347
+ "eval_steps_per_second": 13.485,
348
+ "step": 17000
349
+ },
350
+ {
351
+ "epoch": 1.07,
352
+ "learning_rate": 0.0005993550200295384,
353
+ "loss": 0.7371,
354
+ "step": 17500
355
+ },
356
+ {
357
+ "epoch": 1.1,
358
+ "learning_rate": 0.0005992196339806002,
359
+ "loss": 0.737,
360
+ "step": 18000
361
+ },
362
+ {
363
+ "epoch": 1.1,
364
+ "eval_loss": 0.9001176953315735,
365
+ "eval_runtime": 1.2219,
366
+ "eval_samples_per_second": 818.375,
367
+ "eval_steps_per_second": 13.094,
368
+ "step": 18000
369
+ },
370
+ {
371
+ "epoch": 1.13,
372
+ "learning_rate": 0.0005990713778117324,
373
+ "loss": 0.7369,
374
+ "step": 18500
375
+ },
376
+ {
377
+ "epoch": 1.16,
378
+ "learning_rate": 0.0005989102580081398,
379
+ "loss": 0.737,
380
+ "step": 19000
381
+ },
382
+ {
383
+ "epoch": 1.16,
384
+ "eval_loss": 0.9006143808364868,
385
+ "eval_runtime": 1.2544,
386
+ "eval_samples_per_second": 797.163,
387
+ "eval_steps_per_second": 12.755,
388
+ "step": 19000
389
+ },
390
+ {
391
+ "epoch": 1.19,
392
+ "learning_rate": 0.0005987362816177249,
393
+ "loss": 0.7369,
394
+ "step": 19500
395
+ },
396
+ {
397
+ "epoch": 1.23,
398
+ "learning_rate": 0.0005985494562507783,
399
+ "loss": 0.7369,
400
+ "step": 20000
401
+ },
402
+ {
403
+ "epoch": 1.23,
404
+ "eval_loss": 0.9006676077842712,
405
+ "eval_runtime": 1.2815,
406
+ "eval_samples_per_second": 780.312,
407
+ "eval_steps_per_second": 12.485,
408
+ "step": 20000
409
  }
410
  ],
411
  "max_steps": 250000,
412
  "num_train_epochs": 16,
413
+ "total_flos": 3.203259952279931e+20,
414
  "trial_name": null,
415
  "trial_params": null
416
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4777aef65282d7381884640132b86532faacb12845758f068a2aac0ed0646ae
3
  size 25761253
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee26862b665697be023c901e312db258d269e332dc1eb7fb87e516e00bf064a6
3
  size 25761253