Federic commited on
Commit
e060835
1 Parent(s): 4002e6f

Training in progress, step 125, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fab3134a73ebaab8fdd80b08e343af89a367abbf983975951d42305e9f4b1a19
3
  size 838904832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2440a1d3e22839736f91530c2d0b5e782e46dd86e3e53bef2a6625a989fa1466
3
  size 838904832
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccb014d43e78a1ac6ee0fc089ed64494033b0ebbb44670a9ee58bb714125b84a
3
  size 420633876
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d641a4b63d536e0fd9d4977cc8e26c75ac197343f60b2667be917fff151993b
3
  size 420633876
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8972305b9bf020a4bb38ed00ecea6ce1d19052e69a3ddd5a8bf5840e137d0fb9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdbe66e8af9f834d341b8251d0055801aa4003f83845bb327b01afad2c063103
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aee2b620608ef64c9c8f70ed72f3a0e1bf233746a6ec27ad47abebf797bd2580
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efdbfe8676cd24a75fed7e6e38125bb1bb838a4b70c0fbf7469557659d9b1fec
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3,
5
  "eval_steps": 500,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -457,13 +457,313 @@
457
  "learning_rate": 0.0002,
458
  "loss": 0.531,
459
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
  }
461
  ],
462
  "logging_steps": 1,
463
  "max_steps": 250,
464
  "num_train_epochs": 1,
465
  "save_steps": 25,
466
- "total_flos": 9338307805470720.0,
467
  "trial_name": null,
468
  "trial_params": null
469
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5,
5
  "eval_steps": 500,
6
+ "global_step": 125,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
457
  "learning_rate": 0.0002,
458
  "loss": 0.531,
459
  "step": 75
460
+ },
461
+ {
462
+ "epoch": 0.3,
463
+ "learning_rate": 0.0002,
464
+ "loss": 0.4584,
465
+ "step": 76
466
+ },
467
+ {
468
+ "epoch": 0.31,
469
+ "learning_rate": 0.0002,
470
+ "loss": 0.4835,
471
+ "step": 77
472
+ },
473
+ {
474
+ "epoch": 0.31,
475
+ "learning_rate": 0.0002,
476
+ "loss": 0.5493,
477
+ "step": 78
478
+ },
479
+ {
480
+ "epoch": 0.32,
481
+ "learning_rate": 0.0002,
482
+ "loss": 0.5379,
483
+ "step": 79
484
+ },
485
+ {
486
+ "epoch": 0.32,
487
+ "learning_rate": 0.0002,
488
+ "loss": 0.5795,
489
+ "step": 80
490
+ },
491
+ {
492
+ "epoch": 0.32,
493
+ "learning_rate": 0.0002,
494
+ "loss": 0.4421,
495
+ "step": 81
496
+ },
497
+ {
498
+ "epoch": 0.33,
499
+ "learning_rate": 0.0002,
500
+ "loss": 0.4947,
501
+ "step": 82
502
+ },
503
+ {
504
+ "epoch": 0.33,
505
+ "learning_rate": 0.0002,
506
+ "loss": 0.4997,
507
+ "step": 83
508
+ },
509
+ {
510
+ "epoch": 0.34,
511
+ "learning_rate": 0.0002,
512
+ "loss": 0.4571,
513
+ "step": 84
514
+ },
515
+ {
516
+ "epoch": 0.34,
517
+ "learning_rate": 0.0002,
518
+ "loss": 0.3947,
519
+ "step": 85
520
+ },
521
+ {
522
+ "epoch": 0.34,
523
+ "learning_rate": 0.0002,
524
+ "loss": 0.4796,
525
+ "step": 86
526
+ },
527
+ {
528
+ "epoch": 0.35,
529
+ "learning_rate": 0.0002,
530
+ "loss": 0.3846,
531
+ "step": 87
532
+ },
533
+ {
534
+ "epoch": 0.35,
535
+ "learning_rate": 0.0002,
536
+ "loss": 0.4606,
537
+ "step": 88
538
+ },
539
+ {
540
+ "epoch": 0.36,
541
+ "learning_rate": 0.0002,
542
+ "loss": 0.3776,
543
+ "step": 89
544
+ },
545
+ {
546
+ "epoch": 0.36,
547
+ "learning_rate": 0.0002,
548
+ "loss": 0.4627,
549
+ "step": 90
550
+ },
551
+ {
552
+ "epoch": 0.36,
553
+ "learning_rate": 0.0002,
554
+ "loss": 0.4095,
555
+ "step": 91
556
+ },
557
+ {
558
+ "epoch": 0.37,
559
+ "learning_rate": 0.0002,
560
+ "loss": 0.4047,
561
+ "step": 92
562
+ },
563
+ {
564
+ "epoch": 0.37,
565
+ "learning_rate": 0.0002,
566
+ "loss": 0.3684,
567
+ "step": 93
568
+ },
569
+ {
570
+ "epoch": 0.38,
571
+ "learning_rate": 0.0002,
572
+ "loss": 0.4086,
573
+ "step": 94
574
+ },
575
+ {
576
+ "epoch": 0.38,
577
+ "learning_rate": 0.0002,
578
+ "loss": 0.358,
579
+ "step": 95
580
+ },
581
+ {
582
+ "epoch": 0.38,
583
+ "learning_rate": 0.0002,
584
+ "loss": 0.3824,
585
+ "step": 96
586
+ },
587
+ {
588
+ "epoch": 0.39,
589
+ "learning_rate": 0.0002,
590
+ "loss": 0.3751,
591
+ "step": 97
592
+ },
593
+ {
594
+ "epoch": 0.39,
595
+ "learning_rate": 0.0002,
596
+ "loss": 0.3897,
597
+ "step": 98
598
+ },
599
+ {
600
+ "epoch": 0.4,
601
+ "learning_rate": 0.0002,
602
+ "loss": 0.349,
603
+ "step": 99
604
+ },
605
+ {
606
+ "epoch": 0.4,
607
+ "learning_rate": 0.0002,
608
+ "loss": 0.4674,
609
+ "step": 100
610
+ },
611
+ {
612
+ "epoch": 0.4,
613
+ "learning_rate": 0.0002,
614
+ "loss": 0.7479,
615
+ "step": 101
616
+ },
617
+ {
618
+ "epoch": 0.41,
619
+ "learning_rate": 0.0002,
620
+ "loss": 0.7174,
621
+ "step": 102
622
+ },
623
+ {
624
+ "epoch": 0.41,
625
+ "learning_rate": 0.0002,
626
+ "loss": 0.7081,
627
+ "step": 103
628
+ },
629
+ {
630
+ "epoch": 0.42,
631
+ "learning_rate": 0.0002,
632
+ "loss": 0.6064,
633
+ "step": 104
634
+ },
635
+ {
636
+ "epoch": 0.42,
637
+ "learning_rate": 0.0002,
638
+ "loss": 0.6217,
639
+ "step": 105
640
+ },
641
+ {
642
+ "epoch": 0.42,
643
+ "learning_rate": 0.0002,
644
+ "loss": 0.5967,
645
+ "step": 106
646
+ },
647
+ {
648
+ "epoch": 0.43,
649
+ "learning_rate": 0.0002,
650
+ "loss": 0.5808,
651
+ "step": 107
652
+ },
653
+ {
654
+ "epoch": 0.43,
655
+ "learning_rate": 0.0002,
656
+ "loss": 0.5926,
657
+ "step": 108
658
+ },
659
+ {
660
+ "epoch": 0.44,
661
+ "learning_rate": 0.0002,
662
+ "loss": 0.5861,
663
+ "step": 109
664
+ },
665
+ {
666
+ "epoch": 0.44,
667
+ "learning_rate": 0.0002,
668
+ "loss": 0.5996,
669
+ "step": 110
670
+ },
671
+ {
672
+ "epoch": 0.44,
673
+ "learning_rate": 0.0002,
674
+ "loss": 0.5933,
675
+ "step": 111
676
+ },
677
+ {
678
+ "epoch": 0.45,
679
+ "learning_rate": 0.0002,
680
+ "loss": 0.5812,
681
+ "step": 112
682
+ },
683
+ {
684
+ "epoch": 0.45,
685
+ "learning_rate": 0.0002,
686
+ "loss": 0.5401,
687
+ "step": 113
688
+ },
689
+ {
690
+ "epoch": 0.46,
691
+ "learning_rate": 0.0002,
692
+ "loss": 0.5436,
693
+ "step": 114
694
+ },
695
+ {
696
+ "epoch": 0.46,
697
+ "learning_rate": 0.0002,
698
+ "loss": 0.5415,
699
+ "step": 115
700
+ },
701
+ {
702
+ "epoch": 0.46,
703
+ "learning_rate": 0.0002,
704
+ "loss": 0.5266,
705
+ "step": 116
706
+ },
707
+ {
708
+ "epoch": 0.47,
709
+ "learning_rate": 0.0002,
710
+ "loss": 0.5048,
711
+ "step": 117
712
+ },
713
+ {
714
+ "epoch": 0.47,
715
+ "learning_rate": 0.0002,
716
+ "loss": 0.5286,
717
+ "step": 118
718
+ },
719
+ {
720
+ "epoch": 0.48,
721
+ "learning_rate": 0.0002,
722
+ "loss": 0.5292,
723
+ "step": 119
724
+ },
725
+ {
726
+ "epoch": 0.48,
727
+ "learning_rate": 0.0002,
728
+ "loss": 0.594,
729
+ "step": 120
730
+ },
731
+ {
732
+ "epoch": 0.48,
733
+ "learning_rate": 0.0002,
734
+ "loss": 0.5481,
735
+ "step": 121
736
+ },
737
+ {
738
+ "epoch": 0.49,
739
+ "learning_rate": 0.0002,
740
+ "loss": 0.493,
741
+ "step": 122
742
+ },
743
+ {
744
+ "epoch": 0.49,
745
+ "learning_rate": 0.0002,
746
+ "loss": 0.4596,
747
+ "step": 123
748
+ },
749
+ {
750
+ "epoch": 0.5,
751
+ "learning_rate": 0.0002,
752
+ "loss": 0.5249,
753
+ "step": 124
754
+ },
755
+ {
756
+ "epoch": 0.5,
757
+ "learning_rate": 0.0002,
758
+ "loss": 0.5121,
759
+ "step": 125
760
  }
761
  ],
762
  "logging_steps": 1,
763
  "max_steps": 250,
764
  "num_train_epochs": 1,
765
  "save_steps": 25,
766
+ "total_flos": 1.527345774944256e+16,
767
  "trial_name": null,
768
  "trial_params": null
769
  }