Training in progress, epoch 19, checkpoint
Browse files
last-checkpoint/model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1227009528
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca9c0fa45ca737106eab6be7425caef617a753946ed0508c1aef1f3a4291004d
|
| 3 |
size 1227009528
|
last-checkpoint/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 2454133690
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:acbe2b1c3985ee1c5128707c924ffa1b789c4d55f3b36a9e00d5043900ec85eb
|
| 3 |
size 2454133690
|
last-checkpoint/rng_state.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14244
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f0baebfc08807f25c4a6326e1681bcfbdd8c24e4c42d43ef5df074269e679b0
|
| 3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1064
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0854ef6140ae5c2ba277188e63d5e8a4b0a0fea517aba028586326cadbf26b4b
|
| 3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"best_metric": 34.54485321044922,
|
| 3 |
"best_model_checkpoint": "/kaggle/working/output/checkpoint-20880",
|
| 4 |
-
"epoch":
|
| 5 |
"eval_steps": 500,
|
| 6 |
-
"global_step":
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
@@ -1789,6 +1789,105 @@
|
|
| 1789 |
"eval_samples_per_second": 26.492,
|
| 1790 |
"eval_steps_per_second": 3.329,
|
| 1791 |
"step": 23490
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1792 |
}
|
| 1793 |
],
|
| 1794 |
"logging_steps": 100,
|
|
@@ -1803,7 +1902,7 @@
|
|
| 1803 |
"early_stopping_threshold": 0.0
|
| 1804 |
},
|
| 1805 |
"attributes": {
|
| 1806 |
-
"early_stopping_patience_counter":
|
| 1807 |
}
|
| 1808 |
},
|
| 1809 |
"TrainerControl": {
|
|
@@ -1817,7 +1916,7 @@
|
|
| 1817 |
"attributes": {}
|
| 1818 |
}
|
| 1819 |
},
|
| 1820 |
-
"total_flos": 2.
|
| 1821 |
"train_batch_size": 8,
|
| 1822 |
"trial_name": null,
|
| 1823 |
"trial_params": null
|
|
|
|
| 1 |
{
|
| 2 |
"best_metric": 34.54485321044922,
|
| 3 |
"best_model_checkpoint": "/kaggle/working/output/checkpoint-20880",
|
| 4 |
+
"epoch": 19.0,
|
| 5 |
"eval_steps": 500,
|
| 6 |
+
"global_step": 24795,
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
|
|
| 1789 |
"eval_samples_per_second": 26.492,
|
| 1790 |
"eval_steps_per_second": 3.329,
|
| 1791 |
"step": 23490
|
| 1792 |
+
},
|
| 1793 |
+
{
|
| 1794 |
+
"epoch": 18.007662835249043,
|
| 1795 |
+
"grad_norm": 2.500631809234619,
|
| 1796 |
+
"learning_rate": 3.8750478927203065e-05,
|
| 1797 |
+
"loss": 33.3219,
|
| 1798 |
+
"step": 23500
|
| 1799 |
+
},
|
| 1800 |
+
{
|
| 1801 |
+
"epoch": 18.084291187739463,
|
| 1802 |
+
"grad_norm": 3.90655255317688,
|
| 1803 |
+
"learning_rate": 3.870258620689655e-05,
|
| 1804 |
+
"loss": 33.4211,
|
| 1805 |
+
"step": 23600
|
| 1806 |
+
},
|
| 1807 |
+
{
|
| 1808 |
+
"epoch": 18.160919540229884,
|
| 1809 |
+
"grad_norm": 2.702497720718384,
|
| 1810 |
+
"learning_rate": 3.865469348659004e-05,
|
| 1811 |
+
"loss": 33.2414,
|
| 1812 |
+
"step": 23700
|
| 1813 |
+
},
|
| 1814 |
+
{
|
| 1815 |
+
"epoch": 18.237547892720308,
|
| 1816 |
+
"grad_norm": 1.9609768390655518,
|
| 1817 |
+
"learning_rate": 3.8606800766283525e-05,
|
| 1818 |
+
"loss": 34.0671,
|
| 1819 |
+
"step": 23800
|
| 1820 |
+
},
|
| 1821 |
+
{
|
| 1822 |
+
"epoch": 18.314176245210728,
|
| 1823 |
+
"grad_norm": 2.072951316833496,
|
| 1824 |
+
"learning_rate": 3.855890804597702e-05,
|
| 1825 |
+
"loss": 33.6311,
|
| 1826 |
+
"step": 23900
|
| 1827 |
+
},
|
| 1828 |
+
{
|
| 1829 |
+
"epoch": 18.39080459770115,
|
| 1830 |
+
"grad_norm": 3.249264717102051,
|
| 1831 |
+
"learning_rate": 3.85110153256705e-05,
|
| 1832 |
+
"loss": 32.9968,
|
| 1833 |
+
"step": 24000
|
| 1834 |
+
},
|
| 1835 |
+
{
|
| 1836 |
+
"epoch": 18.467432950191572,
|
| 1837 |
+
"grad_norm": 4.439345359802246,
|
| 1838 |
+
"learning_rate": 3.8463122605363986e-05,
|
| 1839 |
+
"loss": 33.1314,
|
| 1840 |
+
"step": 24100
|
| 1841 |
+
},
|
| 1842 |
+
{
|
| 1843 |
+
"epoch": 18.544061302681992,
|
| 1844 |
+
"grad_norm": 3.9109508991241455,
|
| 1845 |
+
"learning_rate": 3.841522988505747e-05,
|
| 1846 |
+
"loss": 33.3908,
|
| 1847 |
+
"step": 24200
|
| 1848 |
+
},
|
| 1849 |
+
{
|
| 1850 |
+
"epoch": 18.620689655172413,
|
| 1851 |
+
"grad_norm": 2.539151668548584,
|
| 1852 |
+
"learning_rate": 3.836733716475096e-05,
|
| 1853 |
+
"loss": 33.5031,
|
| 1854 |
+
"step": 24300
|
| 1855 |
+
},
|
| 1856 |
+
{
|
| 1857 |
+
"epoch": 18.697318007662837,
|
| 1858 |
+
"grad_norm": 2.6246118545532227,
|
| 1859 |
+
"learning_rate": 3.831944444444445e-05,
|
| 1860 |
+
"loss": 33.6923,
|
| 1861 |
+
"step": 24400
|
| 1862 |
+
},
|
| 1863 |
+
{
|
| 1864 |
+
"epoch": 18.773946360153257,
|
| 1865 |
+
"grad_norm": 3.5379223823547363,
|
| 1866 |
+
"learning_rate": 3.8271551724137934e-05,
|
| 1867 |
+
"loss": 32.9198,
|
| 1868 |
+
"step": 24500
|
| 1869 |
+
},
|
| 1870 |
+
{
|
| 1871 |
+
"epoch": 18.850574712643677,
|
| 1872 |
+
"grad_norm": 3.673536539077759,
|
| 1873 |
+
"learning_rate": 3.822365900383142e-05,
|
| 1874 |
+
"loss": 33.5072,
|
| 1875 |
+
"step": 24600
|
| 1876 |
+
},
|
| 1877 |
+
{
|
| 1878 |
+
"epoch": 18.9272030651341,
|
| 1879 |
+
"grad_norm": 3.9377758502960205,
|
| 1880 |
+
"learning_rate": 3.817576628352491e-05,
|
| 1881 |
+
"loss": 32.8486,
|
| 1882 |
+
"step": 24700
|
| 1883 |
+
},
|
| 1884 |
+
{
|
| 1885 |
+
"epoch": 19.0,
|
| 1886 |
+
"eval_loss": 34.617279052734375,
|
| 1887 |
+
"eval_runtime": 49.3115,
|
| 1888 |
+
"eval_samples_per_second": 26.464,
|
| 1889 |
+
"eval_steps_per_second": 3.326,
|
| 1890 |
+
"step": 24795
|
| 1891 |
}
|
| 1892 |
],
|
| 1893 |
"logging_steps": 100,
|
|
|
|
| 1902 |
"early_stopping_threshold": 0.0
|
| 1903 |
},
|
| 1904 |
"attributes": {
|
| 1905 |
+
"early_stopping_patience_counter": 3
|
| 1906 |
}
|
| 1907 |
},
|
| 1908 |
"TrainerControl": {
|
|
|
|
| 1916 |
"attributes": {}
|
| 1917 |
}
|
| 1918 |
},
|
| 1919 |
+
"total_flos": 2.673890715789619e+16,
|
| 1920 |
"train_batch_size": 8,
|
| 1921 |
"trial_name": null,
|
| 1922 |
"trial_params": null
|