BobaZooba commited on
Commit
daeab07
1 Parent(s): 68183ae

Training in progress, step 75, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0dc74eb4f1664d79e8ca2ad4c0dcaf65b8ebee3025e61594d69d01fb996247d
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:328faf494edd10b13e41db154ed048b702c17b9ccbc5c23e546bdc1fe9922c7f
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fa94faad5f7c2fac41c1d3489928c35288395f9537c10cdb8f345784281ca99
3
  size 42545748
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30fca5bb017a17ab092c6dd50cded3c8b8d747efde3211075d3e56b9cb117207
3
  size 42545748
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:74d3fe4297fd36d6939c92e784b1ab4781c227d97ebf023707e527bcbb1c9fe4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75a202b6ef127b2f370e29b02a695f7e35fad888ae4707bbaedeec3926ad69a8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d102a8b779e588307a5ff1ccea3e1e9ce6254fd84338a3b83b04cad35c2649e7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bbe372fc24045f3af45b8a1f0a38bf204533a38daba940e611e0a4525dedf0a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0012437810945273632,
5
  "eval_steps": 1000,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -307,13 +307,163 @@
307
  "learning_rate": 0.00011157894736842105,
308
  "loss": 1.647,
309
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  }
311
  ],
312
  "logging_steps": 1,
313
  "max_steps": 100,
314
  "num_train_epochs": 1,
315
  "save_steps": 25,
316
- "total_flos": 2841070876655616.0,
317
  "trial_name": null,
318
  "trial_params": null
319
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0018656716417910447,
5
  "eval_steps": 1000,
6
+ "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
307
  "learning_rate": 0.00011157894736842105,
308
  "loss": 1.647,
309
  "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.0,
313
+ "learning_rate": 0.00010947368421052633,
314
+ "loss": 1.5989,
315
+ "step": 51
316
+ },
317
+ {
318
+ "epoch": 0.0,
319
+ "learning_rate": 0.00010736842105263158,
320
+ "loss": 1.5857,
321
+ "step": 52
322
+ },
323
+ {
324
+ "epoch": 0.0,
325
+ "learning_rate": 0.00010526315789473685,
326
+ "loss": 1.0283,
327
+ "step": 53
328
+ },
329
+ {
330
+ "epoch": 0.0,
331
+ "learning_rate": 0.00010315789473684211,
332
+ "loss": 1.6175,
333
+ "step": 54
334
+ },
335
+ {
336
+ "epoch": 0.0,
337
+ "learning_rate": 0.00010105263157894738,
338
+ "loss": 1.6465,
339
+ "step": 55
340
+ },
341
+ {
342
+ "epoch": 0.0,
343
+ "learning_rate": 9.894736842105263e-05,
344
+ "loss": 1.6387,
345
+ "step": 56
346
+ },
347
+ {
348
+ "epoch": 0.0,
349
+ "learning_rate": 9.68421052631579e-05,
350
+ "loss": 1.6037,
351
+ "step": 57
352
+ },
353
+ {
354
+ "epoch": 0.0,
355
+ "learning_rate": 9.473684210526316e-05,
356
+ "loss": 1.7261,
357
+ "step": 58
358
+ },
359
+ {
360
+ "epoch": 0.0,
361
+ "learning_rate": 9.263157894736843e-05,
362
+ "loss": 1.6225,
363
+ "step": 59
364
+ },
365
+ {
366
+ "epoch": 0.0,
367
+ "learning_rate": 9.052631578947369e-05,
368
+ "loss": 1.273,
369
+ "step": 60
370
+ },
371
+ {
372
+ "epoch": 0.0,
373
+ "learning_rate": 8.842105263157894e-05,
374
+ "loss": 1.5125,
375
+ "step": 61
376
+ },
377
+ {
378
+ "epoch": 0.0,
379
+ "learning_rate": 8.631578947368421e-05,
380
+ "loss": 1.4634,
381
+ "step": 62
382
+ },
383
+ {
384
+ "epoch": 0.0,
385
+ "learning_rate": 8.421052631578948e-05,
386
+ "loss": 1.5137,
387
+ "step": 63
388
+ },
389
+ {
390
+ "epoch": 0.0,
391
+ "learning_rate": 8.210526315789474e-05,
392
+ "loss": 1.6608,
393
+ "step": 64
394
+ },
395
+ {
396
+ "epoch": 0.0,
397
+ "learning_rate": 8e-05,
398
+ "loss": 1.7165,
399
+ "step": 65
400
+ },
401
+ {
402
+ "epoch": 0.0,
403
+ "learning_rate": 7.789473684210526e-05,
404
+ "loss": 1.4347,
405
+ "step": 66
406
+ },
407
+ {
408
+ "epoch": 0.0,
409
+ "learning_rate": 7.578947368421054e-05,
410
+ "loss": 1.9672,
411
+ "step": 67
412
+ },
413
+ {
414
+ "epoch": 0.0,
415
+ "learning_rate": 7.368421052631579e-05,
416
+ "loss": 1.2761,
417
+ "step": 68
418
+ },
419
+ {
420
+ "epoch": 0.0,
421
+ "learning_rate": 7.157894736842105e-05,
422
+ "loss": 1.6604,
423
+ "step": 69
424
+ },
425
+ {
426
+ "epoch": 0.0,
427
+ "learning_rate": 6.947368421052632e-05,
428
+ "loss": 1.3673,
429
+ "step": 70
430
+ },
431
+ {
432
+ "epoch": 0.0,
433
+ "learning_rate": 6.736842105263159e-05,
434
+ "loss": 1.6446,
435
+ "step": 71
436
+ },
437
+ {
438
+ "epoch": 0.0,
439
+ "learning_rate": 6.526315789473685e-05,
440
+ "loss": 1.6273,
441
+ "step": 72
442
+ },
443
+ {
444
+ "epoch": 0.0,
445
+ "learning_rate": 6.31578947368421e-05,
446
+ "loss": 1.657,
447
+ "step": 73
448
+ },
449
+ {
450
+ "epoch": 0.0,
451
+ "learning_rate": 6.105263157894737e-05,
452
+ "loss": 1.689,
453
+ "step": 74
454
+ },
455
+ {
456
+ "epoch": 0.0,
457
+ "learning_rate": 5.894736842105263e-05,
458
+ "loss": 1.5599,
459
+ "step": 75
460
  }
461
  ],
462
  "logging_steps": 1,
463
  "max_steps": 100,
464
  "num_train_epochs": 1,
465
  "save_steps": 25,
466
+ "total_flos": 4329984398917632.0,
467
  "trial_name": null,
468
  "trial_params": null
469
  }