kiddothe2b commited on
Commit
f928948
1 Parent(s): f78ff50

Training in progress, step 350000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07a6b46aa8152383b0a50bd07b07a176c831d5337ddec91d8b5b5aba89b5b543
3
  size 996067161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e61ee14a21dfb599f1c63f3e0e8686eb244cce7311c2a2b967485c15ff52a3f8
3
  size 996067161
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2517996d5c4c4163884506060e457650dff4618ff3814b55dc92b5b3c209528
3
  size 498046827
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1fddf171aaf7fcd5407c99cb8a97835dce9ceccd14ee70ed5b23a17af12fdf3
3
  size 498046827
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b06f062cec2bd581a5223b4bd92c5f2dcf09d9cd3cfe1d2d70466080b2e9546a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:036eb5eec66f4acc5bf94747270222b9c5a69b86c3f623f9e981140d862a7ab4
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d998162d48f4ee450ef4bf13d9e047fd62295bbe3f16a4efd05adea28896f336
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fcbaab8f44aa44b57133d3d5819bbf91b8fc92df692100e5583bb214e77939c
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b06f062cec2bd581a5223b4bd92c5f2dcf09d9cd3cfe1d2d70466080b2e9546a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:036eb5eec66f4acc5bf94747270222b9c5a69b86c3f623f9e981140d862a7ab4
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:372875e5805e9be2c35e094a3a6b2332849035b4c4b7dbcec07d8d9b728521d7
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74cd3be6cc3a9e9a714d813672381da6434030e8445f48465040931d56dcaba6
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b06f062cec2bd581a5223b4bd92c5f2dcf09d9cd3cfe1d2d70466080b2e9546a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:673bcbb9d666bc7c835ef864763b55aa80db016b301aae7e39ae8a0cb0efb86a
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:372875e5805e9be2c35e094a3a6b2332849035b4c4b7dbcec07d8d9b728521d7
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74cd3be6cc3a9e9a714d813672381da6434030e8445f48465040931d56dcaba6
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b06f062cec2bd581a5223b4bd92c5f2dcf09d9cd3cfe1d2d70466080b2e9546a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:673bcbb9d666bc7c835ef864763b55aa80db016b301aae7e39ae8a0cb0efb86a
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:305632a8833b49b3167aa1319ddc125408cf66a7d459afebacacaa7cdcedb877
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:036eb5eec66f4acc5bf94747270222b9c5a69b86c3f623f9e981140d862a7ab4
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d41c6734c2aef1f60ed0fbc886cbc351448520889799ebfa66c14f8f9e99059
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f220426de5a076dbb6f66f54955d3a3fc0acbab10b1bd60cf9472b552bfdca
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.032937,
5
- "global_step": 300000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -1854,11 +1854,319 @@
1854
  "eval_samples_per_second": 492.503,
1855
  "eval_steps_per_second": 1.97,
1856
  "step": 300000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1857
  }
1858
  ],
1859
  "max_steps": 1000000,
1860
  "num_train_epochs": 9223372036854775807,
1861
- "total_flos": 5.0546812649472e+18,
1862
  "trial_name": null,
1863
  "trial_params": null
1864
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.08296,
5
+ "global_step": 350000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
1854
  "eval_samples_per_second": 492.503,
1855
  "eval_steps_per_second": 1.97,
1856
  "step": 300000
1857
+ },
1858
+ {
1859
+ "epoch": 1.03,
1860
+ "learning_rate": 8.37422439088976e-05,
1861
+ "loss": 0.894,
1862
+ "step": 301000
1863
+ },
1864
+ {
1865
+ "epoch": 1.03,
1866
+ "learning_rate": 8.362004023673474e-05,
1867
+ "loss": 0.8804,
1868
+ "step": 302000
1869
+ },
1870
+ {
1871
+ "epoch": 1.04,
1872
+ "learning_rate": 8.349746890119826e-05,
1873
+ "loss": 0.8663,
1874
+ "step": 303000
1875
+ },
1876
+ {
1877
+ "epoch": 1.04,
1878
+ "learning_rate": 8.337453124270863e-05,
1879
+ "loss": 0.8767,
1880
+ "step": 304000
1881
+ },
1882
+ {
1883
+ "epoch": 1.04,
1884
+ "learning_rate": 8.32512286056924e-05,
1885
+ "loss": 0.8697,
1886
+ "step": 305000
1887
+ },
1888
+ {
1889
+ "epoch": 1.04,
1890
+ "learning_rate": 8.31275623385675e-05,
1891
+ "loss": 0.876,
1892
+ "step": 306000
1893
+ },
1894
+ {
1895
+ "epoch": 1.04,
1896
+ "learning_rate": 8.300353379372834e-05,
1897
+ "loss": 0.8752,
1898
+ "step": 307000
1899
+ },
1900
+ {
1901
+ "epoch": 1.04,
1902
+ "learning_rate": 8.287914432753123e-05,
1903
+ "loss": 0.8711,
1904
+ "step": 308000
1905
+ },
1906
+ {
1907
+ "epoch": 1.04,
1908
+ "learning_rate": 8.275439530027948e-05,
1909
+ "loss": 0.8526,
1910
+ "step": 309000
1911
+ },
1912
+ {
1913
+ "epoch": 1.04,
1914
+ "learning_rate": 8.262928807620843e-05,
1915
+ "loss": 0.8794,
1916
+ "step": 310000
1917
+ },
1918
+ {
1919
+ "epoch": 1.04,
1920
+ "learning_rate": 8.250382402347065e-05,
1921
+ "loss": 0.8671,
1922
+ "step": 311000
1923
+ },
1924
+ {
1925
+ "epoch": 1.04,
1926
+ "learning_rate": 8.237800451412095e-05,
1927
+ "loss": 0.8772,
1928
+ "step": 312000
1929
+ },
1930
+ {
1931
+ "epoch": 1.05,
1932
+ "learning_rate": 8.225183092410128e-05,
1933
+ "loss": 0.8786,
1934
+ "step": 313000
1935
+ },
1936
+ {
1937
+ "epoch": 1.05,
1938
+ "learning_rate": 8.212530463322583e-05,
1939
+ "loss": 0.8704,
1940
+ "step": 314000
1941
+ },
1942
+ {
1943
+ "epoch": 1.05,
1944
+ "learning_rate": 8.199842702516583e-05,
1945
+ "loss": 0.8801,
1946
+ "step": 315000
1947
+ },
1948
+ {
1949
+ "epoch": 1.05,
1950
+ "learning_rate": 8.18711994874345e-05,
1951
+ "loss": 0.8728,
1952
+ "step": 316000
1953
+ },
1954
+ {
1955
+ "epoch": 1.05,
1956
+ "learning_rate": 8.174362341137177e-05,
1957
+ "loss": 0.8628,
1958
+ "step": 317000
1959
+ },
1960
+ {
1961
+ "epoch": 1.05,
1962
+ "learning_rate": 8.161570019212921e-05,
1963
+ "loss": 0.8591,
1964
+ "step": 318000
1965
+ },
1966
+ {
1967
+ "epoch": 1.05,
1968
+ "learning_rate": 8.148743122865463e-05,
1969
+ "loss": 0.8793,
1970
+ "step": 319000
1971
+ },
1972
+ {
1973
+ "epoch": 1.05,
1974
+ "learning_rate": 8.135881792367686e-05,
1975
+ "loss": 0.8958,
1976
+ "step": 320000
1977
+ },
1978
+ {
1979
+ "epoch": 1.05,
1980
+ "learning_rate": 8.12298616836904e-05,
1981
+ "loss": 0.8768,
1982
+ "step": 321000
1983
+ },
1984
+ {
1985
+ "epoch": 1.05,
1986
+ "learning_rate": 8.110056391894005e-05,
1987
+ "loss": 0.8882,
1988
+ "step": 322000
1989
+ },
1990
+ {
1991
+ "epoch": 1.06,
1992
+ "learning_rate": 8.097092604340542e-05,
1993
+ "loss": 0.8954,
1994
+ "step": 323000
1995
+ },
1996
+ {
1997
+ "epoch": 1.06,
1998
+ "learning_rate": 8.084094947478556e-05,
1999
+ "loss": 0.9074,
2000
+ "step": 324000
2001
+ },
2002
+ {
2003
+ "epoch": 1.06,
2004
+ "learning_rate": 8.07106356344834e-05,
2005
+ "loss": 0.894,
2006
+ "step": 325000
2007
+ },
2008
+ {
2009
+ "epoch": 1.06,
2010
+ "learning_rate": 8.057998594759022e-05,
2011
+ "loss": 0.8978,
2012
+ "step": 326000
2013
+ },
2014
+ {
2015
+ "epoch": 1.06,
2016
+ "learning_rate": 8.044900184287007e-05,
2017
+ "loss": 0.9071,
2018
+ "step": 327000
2019
+ },
2020
+ {
2021
+ "epoch": 1.06,
2022
+ "learning_rate": 8.031768475274413e-05,
2023
+ "loss": 0.9107,
2024
+ "step": 328000
2025
+ },
2026
+ {
2027
+ "epoch": 1.06,
2028
+ "learning_rate": 8.018603611327504e-05,
2029
+ "loss": 0.8774,
2030
+ "step": 329000
2031
+ },
2032
+ {
2033
+ "epoch": 1.06,
2034
+ "learning_rate": 8.005405736415126e-05,
2035
+ "loss": 0.8975,
2036
+ "step": 330000
2037
+ },
2038
+ {
2039
+ "epoch": 1.06,
2040
+ "learning_rate": 7.992174994867123e-05,
2041
+ "loss": 0.8813,
2042
+ "step": 331000
2043
+ },
2044
+ {
2045
+ "epoch": 1.06,
2046
+ "learning_rate": 7.978911531372765e-05,
2047
+ "loss": 0.869,
2048
+ "step": 332000
2049
+ },
2050
+ {
2051
+ "epoch": 1.07,
2052
+ "learning_rate": 7.965615490979163e-05,
2053
+ "loss": 0.8809,
2054
+ "step": 333000
2055
+ },
2056
+ {
2057
+ "epoch": 1.07,
2058
+ "learning_rate": 7.952287019089685e-05,
2059
+ "loss": 0.8669,
2060
+ "step": 334000
2061
+ },
2062
+ {
2063
+ "epoch": 1.07,
2064
+ "learning_rate": 7.938926261462366e-05,
2065
+ "loss": 0.8675,
2066
+ "step": 335000
2067
+ },
2068
+ {
2069
+ "epoch": 1.07,
2070
+ "learning_rate": 7.925533364208309e-05,
2071
+ "loss": 0.8748,
2072
+ "step": 336000
2073
+ },
2074
+ {
2075
+ "epoch": 1.07,
2076
+ "learning_rate": 7.912108473790092e-05,
2077
+ "loss": 0.883,
2078
+ "step": 337000
2079
+ },
2080
+ {
2081
+ "epoch": 1.07,
2082
+ "learning_rate": 7.898651737020166e-05,
2083
+ "loss": 0.8941,
2084
+ "step": 338000
2085
+ },
2086
+ {
2087
+ "epoch": 1.07,
2088
+ "learning_rate": 7.88516330105925e-05,
2089
+ "loss": 0.8876,
2090
+ "step": 339000
2091
+ },
2092
+ {
2093
+ "epoch": 1.07,
2094
+ "learning_rate": 7.871643313414718e-05,
2095
+ "loss": 0.8878,
2096
+ "step": 340000
2097
+ },
2098
+ {
2099
+ "epoch": 1.07,
2100
+ "learning_rate": 7.858091921938988e-05,
2101
+ "loss": 0.8908,
2102
+ "step": 341000
2103
+ },
2104
+ {
2105
+ "epoch": 1.07,
2106
+ "learning_rate": 7.844509274827907e-05,
2107
+ "loss": 0.8878,
2108
+ "step": 342000
2109
+ },
2110
+ {
2111
+ "epoch": 1.08,
2112
+ "learning_rate": 7.830895520619128e-05,
2113
+ "loss": 0.8865,
2114
+ "step": 343000
2115
+ },
2116
+ {
2117
+ "epoch": 1.08,
2118
+ "learning_rate": 7.817250808190483e-05,
2119
+ "loss": 0.8842,
2120
+ "step": 344000
2121
+ },
2122
+ {
2123
+ "epoch": 1.08,
2124
+ "learning_rate": 7.803575286758364e-05,
2125
+ "loss": 0.8879,
2126
+ "step": 345000
2127
+ },
2128
+ {
2129
+ "epoch": 1.08,
2130
+ "learning_rate": 7.789869105876083e-05,
2131
+ "loss": 0.8964,
2132
+ "step": 346000
2133
+ },
2134
+ {
2135
+ "epoch": 1.08,
2136
+ "learning_rate": 7.776132415432234e-05,
2137
+ "loss": 0.8836,
2138
+ "step": 347000
2139
+ },
2140
+ {
2141
+ "epoch": 1.08,
2142
+ "learning_rate": 7.762365365649067e-05,
2143
+ "loss": 0.8834,
2144
+ "step": 348000
2145
+ },
2146
+ {
2147
+ "epoch": 1.08,
2148
+ "learning_rate": 7.748568107080832e-05,
2149
+ "loss": 0.8907,
2150
+ "step": 349000
2151
+ },
2152
+ {
2153
+ "epoch": 1.08,
2154
+ "learning_rate": 7.734740790612136e-05,
2155
+ "loss": 0.8899,
2156
+ "step": 350000
2157
+ },
2158
+ {
2159
+ "epoch": 1.08,
2160
+ "eval_loss": 0.8285813331604004,
2161
+ "eval_runtime": 24.9856,
2162
+ "eval_samples_per_second": 400.23,
2163
+ "eval_steps_per_second": 1.601,
2164
+ "step": 350000
2165
  }
2166
  ],
2167
  "max_steps": 1000000,
2168
  "num_train_epochs": 9223372036854775807,
2169
+ "total_flos": 5.897119717969625e+18,
2170
  "trial_name": null,
2171
  "trial_params": null
2172
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b4ff5810bc5e548a89007cef7a8f26eae082bc23f1a60b2fb29c87071c0fb01
3
  size 3375
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:225de94e6547ae8e2d8e5b9a0e34c719612311b29038a27f9107115c46808f63
3
  size 3375
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2517996d5c4c4163884506060e457650dff4618ff3814b55dc92b5b3c209528
3
  size 498046827
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1fddf171aaf7fcd5407c99cb8a97835dce9ceccd14ee70ed5b23a17af12fdf3
3
  size 498046827
runs/Nov11_11-14-20_t1v-n-088af867-w-0/events.out.tfevents.1668165319.t1v-n-088af867-w-0.346597.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4c444c4e8e903f12d92ca730d51afacf1008416ca703be4c9290ba2b7d54fe2
3
- size 53355
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f584155741b1ac1c66f2deec1a86eb4dec91ffb6ede4b340e14a38064332a51
3
+ size 61035
runs/Nov17_09-03-22_t1v-n-088af867-w-0/1668675880.4394102/events.out.tfevents.1668675880.t1v-n-088af867-w-0.91101.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84092474497834ded5ff6a628c5df8f98b1991b6bf66a6e9c229275fb3983810
3
+ size 5419
runs/Nov17_09-03-22_t1v-n-088af867-w-0/events.out.tfevents.1668675880.t1v-n-088af867-w-0.91101.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d571d78f4fe0979689c970eb58513ff26e9e28f757dd306ffedcec3e98b4b2
3
+ size 3748
runs/Nov21_18-14-51_t1v-n-088af867-w-0/1669054551.8620195/events.out.tfevents.1669054551.t1v-n-088af867-w-0.42571.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eadbd757619674b78bab6f3a98c1ced53a048c7440ccbd25ca0e2ad1b0441cf
3
+ size 5419
runs/Nov21_18-14-51_t1v-n-088af867-w-0/events.out.tfevents.1669054551.t1v-n-088af867-w-0.42571.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0ad7fd9cf05d0068da00ee4295784721aa3d2aa25d542d74eb8a8ae739909e
3
+ size 12024
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b4ff5810bc5e548a89007cef7a8f26eae082bc23f1a60b2fb29c87071c0fb01
3
  size 3375
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:225de94e6547ae8e2d8e5b9a0e34c719612311b29038a27f9107115c46808f63
3
  size 3375