joelniklaus commited on
Commit
ecead13
1 Parent(s): e01f0f5

Training in progress, step 300000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a578f7551dd077a1741a9d5dcd3bd4333580b2731835a3f015fe22360df3a83b
3
  size 3480942553
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fefed8b102a6936eda181b658c990bc10d4d4a2ae7cb8812a294cc76c69e578
3
  size 3480942553
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ae9f86b8b89b814acf498e9a03aed893c7c849c7e09de5ee986508668cb6375
3
  size 1740493675
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b934ec2a5563ecac5345895272be6cec213b81ccadce342f703b4ceace89f14
3
  size 1740493675
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d56a3662a3ebad3c3a7ebbc555dfbf27635a16b7620edcb1f2e45f19fa72d6
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc399492a534e9b233d434e2e66af66e569edf12791d7ada8e8d194ca2566c94
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7924e9d3f9ed054868d3ddaa60025f26707d231e7eacc5684e8550acfee9e9c0
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d41c6734c2aef1f60ed0fbc886cbc351448520889799ebfa66c14f8f9e99059
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.25,
5
- "global_step": 250000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -1546,11 +1546,319 @@
1546
  "eval_samples_per_second": 45.67,
1547
  "eval_steps_per_second": 0.722,
1548
  "step": 250000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1549
  }
1550
  ],
1551
  "max_steps": 1000000,
1552
  "num_train_epochs": 9223372036854775807,
1553
- "total_flos": 1.4917193170944e+19,
1554
  "trial_name": null,
1555
  "trial_params": null
1556
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3,
5
+ "global_step": 300000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
1546
  "eval_samples_per_second": 45.67,
1547
  "eval_steps_per_second": 0.722,
1548
  "step": 250000
1549
+ },
1550
+ {
1551
+ "epoch": 0.25,
1552
+ "learning_rate": 8.935525168886262e-05,
1553
+ "loss": 0.9224,
1554
+ "step": 251000
1555
+ },
1556
+ {
1557
+ "epoch": 0.25,
1558
+ "learning_rate": 8.92530475251784e-05,
1559
+ "loss": 0.9001,
1560
+ "step": 252000
1561
+ },
1562
+ {
1563
+ "epoch": 0.25,
1564
+ "learning_rate": 8.91504140964553e-05,
1565
+ "loss": 0.9038,
1566
+ "step": 253000
1567
+ },
1568
+ {
1569
+ "epoch": 0.25,
1570
+ "learning_rate": 8.90473525250761e-05,
1571
+ "loss": 0.9049,
1572
+ "step": 254000
1573
+ },
1574
+ {
1575
+ "epoch": 0.26,
1576
+ "learning_rate": 8.894386393810563e-05,
1577
+ "loss": 0.9038,
1578
+ "step": 255000
1579
+ },
1580
+ {
1581
+ "epoch": 0.26,
1582
+ "learning_rate": 8.883994946727849e-05,
1583
+ "loss": 0.9287,
1584
+ "step": 256000
1585
+ },
1586
+ {
1587
+ "epoch": 0.26,
1588
+ "learning_rate": 8.873561024898668e-05,
1589
+ "loss": 0.936,
1590
+ "step": 257000
1591
+ },
1592
+ {
1593
+ "epoch": 0.26,
1594
+ "learning_rate": 8.863084742426719e-05,
1595
+ "loss": 0.9456,
1596
+ "step": 258000
1597
+ },
1598
+ {
1599
+ "epoch": 0.26,
1600
+ "learning_rate": 8.852566213878947e-05,
1601
+ "loss": 0.9164,
1602
+ "step": 259000
1603
+ },
1604
+ {
1605
+ "epoch": 0.26,
1606
+ "learning_rate": 8.842005554284296e-05,
1607
+ "loss": 0.9266,
1608
+ "step": 260000
1609
+ },
1610
+ {
1611
+ "epoch": 0.26,
1612
+ "learning_rate": 8.831402879132446e-05,
1613
+ "loss": 0.8984,
1614
+ "step": 261000
1615
+ },
1616
+ {
1617
+ "epoch": 0.26,
1618
+ "learning_rate": 8.820758304372557e-05,
1619
+ "loss": 0.9258,
1620
+ "step": 262000
1621
+ },
1622
+ {
1623
+ "epoch": 0.26,
1624
+ "learning_rate": 8.810071946411989e-05,
1625
+ "loss": 0.9244,
1626
+ "step": 263000
1627
+ },
1628
+ {
1629
+ "epoch": 0.26,
1630
+ "learning_rate": 8.799343922115044e-05,
1631
+ "loss": 0.9394,
1632
+ "step": 264000
1633
+ },
1634
+ {
1635
+ "epoch": 0.27,
1636
+ "learning_rate": 8.788574348801675e-05,
1637
+ "loss": 0.9374,
1638
+ "step": 265000
1639
+ },
1640
+ {
1641
+ "epoch": 0.27,
1642
+ "learning_rate": 8.77776334424621e-05,
1643
+ "loss": 0.9406,
1644
+ "step": 266000
1645
+ },
1646
+ {
1647
+ "epoch": 0.27,
1648
+ "learning_rate": 8.766911026676064e-05,
1649
+ "loss": 0.939,
1650
+ "step": 267000
1651
+ },
1652
+ {
1653
+ "epoch": 0.27,
1654
+ "learning_rate": 8.756017514770443e-05,
1655
+ "loss": 0.9367,
1656
+ "step": 268000
1657
+ },
1658
+ {
1659
+ "epoch": 0.27,
1660
+ "learning_rate": 8.745082927659047e-05,
1661
+ "loss": 0.9334,
1662
+ "step": 269000
1663
+ },
1664
+ {
1665
+ "epoch": 0.27,
1666
+ "learning_rate": 8.73410738492077e-05,
1667
+ "loss": 0.9112,
1668
+ "step": 270000
1669
+ },
1670
+ {
1671
+ "epoch": 0.27,
1672
+ "learning_rate": 8.723091006582389e-05,
1673
+ "loss": 0.9138,
1674
+ "step": 271000
1675
+ },
1676
+ {
1677
+ "epoch": 0.27,
1678
+ "learning_rate": 8.71203391311725e-05,
1679
+ "loss": 0.9107,
1680
+ "step": 272000
1681
+ },
1682
+ {
1683
+ "epoch": 0.27,
1684
+ "learning_rate": 8.700936225443959e-05,
1685
+ "loss": 0.8952,
1686
+ "step": 273000
1687
+ },
1688
+ {
1689
+ "epoch": 0.27,
1690
+ "learning_rate": 8.689798064925049e-05,
1691
+ "loss": 0.9094,
1692
+ "step": 274000
1693
+ },
1694
+ {
1695
+ "epoch": 0.28,
1696
+ "learning_rate": 8.678619553365659e-05,
1697
+ "loss": 0.9174,
1698
+ "step": 275000
1699
+ },
1700
+ {
1701
+ "epoch": 0.28,
1702
+ "learning_rate": 8.6674008130122e-05,
1703
+ "loss": 0.9266,
1704
+ "step": 276000
1705
+ },
1706
+ {
1707
+ "epoch": 0.28,
1708
+ "learning_rate": 8.656141966551019e-05,
1709
+ "loss": 0.8974,
1710
+ "step": 277000
1711
+ },
1712
+ {
1713
+ "epoch": 0.28,
1714
+ "learning_rate": 8.644843137107059e-05,
1715
+ "loss": 0.906,
1716
+ "step": 278000
1717
+ },
1718
+ {
1719
+ "epoch": 0.28,
1720
+ "learning_rate": 8.633504448242505e-05,
1721
+ "loss": 0.8943,
1722
+ "step": 279000
1723
+ },
1724
+ {
1725
+ "epoch": 0.28,
1726
+ "learning_rate": 8.622126023955446e-05,
1727
+ "loss": 0.8919,
1728
+ "step": 280000
1729
+ },
1730
+ {
1731
+ "epoch": 0.28,
1732
+ "learning_rate": 8.610707988678503e-05,
1733
+ "loss": 0.9139,
1734
+ "step": 281000
1735
+ },
1736
+ {
1737
+ "epoch": 0.28,
1738
+ "learning_rate": 8.599250467277483e-05,
1739
+ "loss": 0.9123,
1740
+ "step": 282000
1741
+ },
1742
+ {
1743
+ "epoch": 0.28,
1744
+ "learning_rate": 8.587753585050004e-05,
1745
+ "loss": 0.9071,
1746
+ "step": 283000
1747
+ },
1748
+ {
1749
+ "epoch": 0.28,
1750
+ "learning_rate": 8.576217467724128e-05,
1751
+ "loss": 0.8988,
1752
+ "step": 284000
1753
+ },
1754
+ {
1755
+ "epoch": 0.28,
1756
+ "learning_rate": 8.564642241456986e-05,
1757
+ "loss": 0.8992,
1758
+ "step": 285000
1759
+ },
1760
+ {
1761
+ "epoch": 0.29,
1762
+ "learning_rate": 8.553028032833397e-05,
1763
+ "loss": 0.8935,
1764
+ "step": 286000
1765
+ },
1766
+ {
1767
+ "epoch": 0.29,
1768
+ "learning_rate": 8.541374968864487e-05,
1769
+ "loss": 0.8905,
1770
+ "step": 287000
1771
+ },
1772
+ {
1773
+ "epoch": 0.29,
1774
+ "learning_rate": 8.529683176986295e-05,
1775
+ "loss": 0.8939,
1776
+ "step": 288000
1777
+ },
1778
+ {
1779
+ "epoch": 0.29,
1780
+ "learning_rate": 8.517952785058385e-05,
1781
+ "loss": 0.887,
1782
+ "step": 289000
1783
+ },
1784
+ {
1785
+ "epoch": 0.29,
1786
+ "learning_rate": 8.506183921362443e-05,
1787
+ "loss": 0.9061,
1788
+ "step": 290000
1789
+ },
1790
+ {
1791
+ "epoch": 0.29,
1792
+ "learning_rate": 8.494376714600878e-05,
1793
+ "loss": 0.9229,
1794
+ "step": 291000
1795
+ },
1796
+ {
1797
+ "epoch": 0.29,
1798
+ "learning_rate": 8.482531293895412e-05,
1799
+ "loss": 0.9109,
1800
+ "step": 292000
1801
+ },
1802
+ {
1803
+ "epoch": 0.29,
1804
+ "learning_rate": 8.470647788785665e-05,
1805
+ "loss": 0.9066,
1806
+ "step": 293000
1807
+ },
1808
+ {
1809
+ "epoch": 0.29,
1810
+ "learning_rate": 8.458726329227747e-05,
1811
+ "loss": 0.9235,
1812
+ "step": 294000
1813
+ },
1814
+ {
1815
+ "epoch": 0.29,
1816
+ "learning_rate": 8.44676704559283e-05,
1817
+ "loss": 0.9373,
1818
+ "step": 295000
1819
+ },
1820
+ {
1821
+ "epoch": 0.3,
1822
+ "learning_rate": 8.434770068665723e-05,
1823
+ "loss": 0.9142,
1824
+ "step": 296000
1825
+ },
1826
+ {
1827
+ "epoch": 0.3,
1828
+ "learning_rate": 8.422735529643444e-05,
1829
+ "loss": 0.9029,
1830
+ "step": 297000
1831
+ },
1832
+ {
1833
+ "epoch": 0.3,
1834
+ "learning_rate": 8.410663560133784e-05,
1835
+ "loss": 0.9235,
1836
+ "step": 298000
1837
+ },
1838
+ {
1839
+ "epoch": 0.3,
1840
+ "learning_rate": 8.398554292153866e-05,
1841
+ "loss": 0.9367,
1842
+ "step": 299000
1843
+ },
1844
+ {
1845
+ "epoch": 0.3,
1846
+ "learning_rate": 8.386407858128706e-05,
1847
+ "loss": 0.9122,
1848
+ "step": 300000
1849
+ },
1850
+ {
1851
+ "epoch": 0.3,
1852
+ "eval_loss": 0.4920811951160431,
1853
+ "eval_runtime": 74.5828,
1854
+ "eval_samples_per_second": 67.04,
1855
+ "eval_steps_per_second": 1.059,
1856
+ "step": 300000
1857
  }
1858
  ],
1859
  "max_steps": 1000000,
1860
  "num_train_epochs": 9223372036854775807,
1861
+ "total_flos": 1.79006318051328e+19,
1862
  "trial_name": null,
1863
  "trial_params": null
1864
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af22f2655e1ea7ea408bf09878bad5947fafd76806a9e23b3d50364096500316
3
  size 3439
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c82b4195e2ad665930b30d576207a9ab7565bc09b17aba2630befee0f99fbd36
3
  size 3439
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ae9f86b8b89b814acf498e9a03aed893c7c849c7e09de5ee986508668cb6375
3
  size 1740493675
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b934ec2a5563ecac5345895272be6cec213b81ccadce342f703b4ceace89f14
3
  size 1740493675
runs/Feb07_17-13-03_t1v-n-3b560330-w-0/1675790890.3029315/events.out.tfevents.1675790890.t1v-n-3b560330-w-0.60740.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f46ad33886904e0a5733be894af6ddc24d92fab5e1863900a6b6f6ef0512ef2
3
+ size 5479
runs/Feb07_17-13-03_t1v-n-3b560330-w-0/events.out.tfevents.1675790890.t1v-n-3b560330-w-0.60740.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11815001fb11f240e887bf8f21426a09eeaa6873922428edcd70bd9f88b77f3b
3
+ size 12089
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af22f2655e1ea7ea408bf09878bad5947fafd76806a9e23b3d50364096500316
3
  size 3439
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c82b4195e2ad665930b30d576207a9ab7565bc09b17aba2630befee0f99fbd36
3
  size 3439