BobaZooba commited on
Commit
f36a8c2
1 Parent(s): 89e0ba4

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:328faf494edd10b13e41db154ed048b702c17b9ccbc5c23e546bdc1fe9922c7f
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ce65206cf6ca0cf316938d51c6c033e151e1e6ac908945246c9b956f4a10d9
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30fca5bb017a17ab092c6dd50cded3c8b8d747efde3211075d3e56b9cb117207
3
  size 42545748
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:059cf83137ad470f03a7369832c1c02c2e711f6619ff7818b16635ee7d869f38
3
  size 42545748
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75a202b6ef127b2f370e29b02a695f7e35fad888ae4707bbaedeec3926ad69a8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169c733cf7764f1d968254c0dd657bf888f42fc96732223a4c949c1fbe9bd0bd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bbe372fc24045f3af45b8a1f0a38bf204533a38daba940e611e0a4525dedf0a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93721a0af79aa5eb1e01c404a6e6402b4f90a54ab1a62e1b2ab8d44938b03bd3
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0018656716417910447,
5
  "eval_steps": 1000,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -457,13 +457,163 @@
457
  "learning_rate": 5.894736842105263e-05,
458
  "loss": 1.5599,
459
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
  }
461
  ],
462
  "logging_steps": 1,
463
  "max_steps": 100,
464
  "num_train_epochs": 1,
465
  "save_steps": 25,
466
- "total_flos": 4329984398917632.0,
467
  "trial_name": null,
468
  "trial_params": null
469
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0024875621890547263,
5
  "eval_steps": 1000,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
457
  "learning_rate": 5.894736842105263e-05,
458
  "loss": 1.5599,
459
  "step": 75
460
+ },
461
+ {
462
+ "epoch": 0.0,
463
+ "learning_rate": 5.68421052631579e-05,
464
+ "loss": 1.3117,
465
+ "step": 76
466
+ },
467
+ {
468
+ "epoch": 0.0,
469
+ "learning_rate": 5.4736842105263165e-05,
470
+ "loss": 1.5121,
471
+ "step": 77
472
+ },
473
+ {
474
+ "epoch": 0.0,
475
+ "learning_rate": 5.2631578947368424e-05,
476
+ "loss": 1.6346,
477
+ "step": 78
478
+ },
479
+ {
480
+ "epoch": 0.0,
481
+ "learning_rate": 5.052631578947369e-05,
482
+ "loss": 1.5391,
483
+ "step": 79
484
+ },
485
+ {
486
+ "epoch": 0.0,
487
+ "learning_rate": 4.842105263157895e-05,
488
+ "loss": 1.6792,
489
+ "step": 80
490
+ },
491
+ {
492
+ "epoch": 0.0,
493
+ "learning_rate": 4.6315789473684214e-05,
494
+ "loss": 1.6389,
495
+ "step": 81
496
+ },
497
+ {
498
+ "epoch": 0.0,
499
+ "learning_rate": 4.421052631578947e-05,
500
+ "loss": 1.6191,
501
+ "step": 82
502
+ },
503
+ {
504
+ "epoch": 0.0,
505
+ "learning_rate": 4.210526315789474e-05,
506
+ "loss": 1.6297,
507
+ "step": 83
508
+ },
509
+ {
510
+ "epoch": 0.0,
511
+ "learning_rate": 4e-05,
512
+ "loss": 1.5525,
513
+ "step": 84
514
+ },
515
+ {
516
+ "epoch": 0.0,
517
+ "learning_rate": 3.789473684210527e-05,
518
+ "loss": 1.4816,
519
+ "step": 85
520
+ },
521
+ {
522
+ "epoch": 0.0,
523
+ "learning_rate": 3.578947368421053e-05,
524
+ "loss": 1.5453,
525
+ "step": 86
526
+ },
527
+ {
528
+ "epoch": 0.0,
529
+ "learning_rate": 3.368421052631579e-05,
530
+ "loss": 1.7132,
531
+ "step": 87
532
+ },
533
+ {
534
+ "epoch": 0.0,
535
+ "learning_rate": 3.157894736842105e-05,
536
+ "loss": 1.7889,
537
+ "step": 88
538
+ },
539
+ {
540
+ "epoch": 0.0,
541
+ "learning_rate": 2.9473684210526314e-05,
542
+ "loss": 1.3749,
543
+ "step": 89
544
+ },
545
+ {
546
+ "epoch": 0.0,
547
+ "learning_rate": 2.7368421052631583e-05,
548
+ "loss": 1.337,
549
+ "step": 90
550
+ },
551
+ {
552
+ "epoch": 0.0,
553
+ "learning_rate": 2.5263157894736845e-05,
554
+ "loss": 1.4863,
555
+ "step": 91
556
+ },
557
+ {
558
+ "epoch": 0.0,
559
+ "learning_rate": 2.3157894736842107e-05,
560
+ "loss": 1.4914,
561
+ "step": 92
562
+ },
563
+ {
564
+ "epoch": 0.0,
565
+ "learning_rate": 2.105263157894737e-05,
566
+ "loss": 1.3957,
567
+ "step": 93
568
+ },
569
+ {
570
+ "epoch": 0.0,
571
+ "learning_rate": 1.8947368421052634e-05,
572
+ "loss": 1.4742,
573
+ "step": 94
574
+ },
575
+ {
576
+ "epoch": 0.0,
577
+ "learning_rate": 1.6842105263157896e-05,
578
+ "loss": 1.6525,
579
+ "step": 95
580
+ },
581
+ {
582
+ "epoch": 0.0,
583
+ "learning_rate": 1.4736842105263157e-05,
584
+ "loss": 1.6347,
585
+ "step": 96
586
+ },
587
+ {
588
+ "epoch": 0.0,
589
+ "learning_rate": 1.2631578947368422e-05,
590
+ "loss": 1.5477,
591
+ "step": 97
592
+ },
593
+ {
594
+ "epoch": 0.0,
595
+ "learning_rate": 1.0526315789473684e-05,
596
+ "loss": 1.318,
597
+ "step": 98
598
+ },
599
+ {
600
+ "epoch": 0.0,
601
+ "learning_rate": 8.421052631578948e-06,
602
+ "loss": 1.8458,
603
+ "step": 99
604
+ },
605
+ {
606
+ "epoch": 0.0,
607
+ "learning_rate": 6.315789473684211e-06,
608
+ "loss": 2.0586,
609
+ "step": 100
610
  }
611
  ],
612
  "logging_steps": 1,
613
  "max_steps": 100,
614
  "num_train_epochs": 1,
615
  "save_steps": 25,
616
+ "total_flos": 5587576318083072.0,
617
  "trial_name": null,
618
  "trial_params": null
619
  }