nicolasdupuisroy commited on
Commit
ef9b1bf
1 Parent(s): 897ec08

Training in progress, epoch 121

Browse files
.DS_Store ADDED
Binary file (32.8 kB). View file
 
README.md CHANGED
@@ -24,7 +24,7 @@ model-index:
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
- value: 0.7153846153846154
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -34,8 +34,8 @@ should probably proofread and complete it, then remove this comment. -->
34
 
35
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
36
  It achieves the following results on the evaluation set:
37
- - Loss: 2.1003
38
- - Accuracy: 0.7154
39
 
40
  ## Model description
41
 
@@ -60,7 +60,7 @@ The following hyperparameters were used during training:
60
  - seed: 1337
61
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
  - lr_scheduler_type: linear
63
- - num_epochs: 150.0
64
 
65
  ### Training results
66
 
 
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
+ value: 0.5846153846153846
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
34
 
35
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 2.5403
38
+ - Accuracy: 0.5846
39
 
40
  ## Model description
41
 
 
60
  - seed: 1337
61
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
  - lr_scheduler_type: linear
63
+ - num_epochs: 120.0
64
 
65
  ### Training results
66
 
all_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
- "epoch": 150.0,
3
- "eval_accuracy": 0.7153846153846154,
4
- "eval_loss": 2.100327730178833,
5
- "eval_runtime": 2.528,
6
- "eval_samples_per_second": 51.425,
7
- "eval_steps_per_second": 0.791,
8
- "train_loss": 0.2745589065551758,
9
- "train_runtime": 1386.6967,
10
- "train_samples_per_second": 56.249,
11
- "train_steps_per_second": 0.757
12
  }
 
1
  {
2
+ "epoch": 120.0,
3
+ "eval_accuracy": 0.5846153846153846,
4
+ "eval_loss": 2.5402629375457764,
5
+ "eval_runtime": 2.5184,
6
+ "eval_samples_per_second": 51.619,
7
+ "eval_steps_per_second": 0.794,
8
+ "train_loss": 0.3072653747740246,
9
+ "train_runtime": 892.0889,
10
+ "train_samples_per_second": 69.948,
11
+ "train_steps_per_second": 0.942
12
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 150.0,
3
- "eval_accuracy": 0.7153846153846154,
4
- "eval_loss": 2.100327730178833,
5
- "eval_runtime": 2.528,
6
- "eval_samples_per_second": 51.425,
7
- "eval_steps_per_second": 0.791
8
  }
 
1
  {
2
+ "epoch": 120.0,
3
+ "eval_accuracy": 0.5846153846153846,
4
+ "eval_loss": 2.5402629375457764,
5
+ "eval_runtime": 2.5184,
6
+ "eval_samples_per_second": 51.619,
7
+ "eval_steps_per_second": 0.794
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3cf5a5801c077654e03d1e4d3c08ef6f8e4aa414bd85c0b781d010a05b521c9
3
  size 343377784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e852752758dbcffb10d194303246407fb0e3152eade28c3cc950cef69ed147cc
3
  size 343377784
runs/.DS_Store ADDED
Binary file (6.15 kB). View file
 
runs/Jan17_20-08-24_d06676088071/events.out.tfevents.1705522127.d06676088071.30248.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:337736ec6d4369c0c807a5806b557302add774f4eb1a24730b77f8448d05612b
3
- size 19591
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0e21286437dafa65d2446bed5a42350562a39927ce8fec1ac417b06e9b81d35
3
+ size 16994
runs/Jan17_21-31-37_c6ad14a30b7d/events.out.tfevents.1705527135.c6ad14a30b7d.8359.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b21097c39781b4afceb06efff2c95cba66586f50e01533f837a74c9b54bb7cf
3
+ size 6325
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "epoch": 150.0,
3
- "train_loss": 0.2745589065551758,
4
- "train_runtime": 1386.6967,
5
- "train_samples_per_second": 56.249,
6
- "train_steps_per_second": 0.757
7
  }
 
1
  {
2
+ "epoch": 120.0,
3
+ "train_loss": 0.3072653747740246,
4
+ "train_runtime": 892.0889,
5
+ "train_samples_per_second": 69.948,
6
+ "train_steps_per_second": 0.942
7
  }
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.100327730178833,
3
- "best_model_checkpoint": "./drive/MyDrive/repositories/torch_example_image-classification/outputs_letter3/checkpoint-1043",
4
- "epoch": 150.0,
5
  "eval_steps": 500,
6
- "global_step": 1050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1609,419 +1609,14 @@
1609
  "train_runtime": 892.0889,
1610
  "train_samples_per_second": 69.948,
1611
  "train_steps_per_second": 0.942
1612
- },
1613
- {
1614
- "epoch": 121.0,
1615
- "eval_accuracy": 0.6307692307692307,
1616
- "eval_loss": 2.5144028663635254,
1617
- "eval_runtime": 2.3105,
1618
- "eval_samples_per_second": 56.264,
1619
- "eval_steps_per_second": 0.866,
1620
- "step": 847
1621
- },
1622
- {
1623
- "epoch": 121.43,
1624
- "learning_rate": 1.980952380952381e-05,
1625
- "loss": 1.654,
1626
- "step": 850
1627
- },
1628
- {
1629
- "epoch": 122.0,
1630
- "eval_accuracy": 0.6153846153846154,
1631
- "eval_loss": 2.498945474624634,
1632
- "eval_runtime": 2.3196,
1633
- "eval_samples_per_second": 56.043,
1634
- "eval_steps_per_second": 0.862,
1635
- "step": 854
1636
- },
1637
- {
1638
- "epoch": 122.86,
1639
- "learning_rate": 1.961904761904762e-05,
1640
- "loss": 1.5863,
1641
- "step": 860
1642
- },
1643
- {
1644
- "epoch": 123.0,
1645
- "eval_accuracy": 0.6230769230769231,
1646
- "eval_loss": 2.476050853729248,
1647
- "eval_runtime": 2.4097,
1648
- "eval_samples_per_second": 53.949,
1649
- "eval_steps_per_second": 0.83,
1650
- "step": 861
1651
- },
1652
- {
1653
- "epoch": 124.0,
1654
- "eval_accuracy": 0.6,
1655
- "eval_loss": 2.4536540508270264,
1656
- "eval_runtime": 2.5779,
1657
- "eval_samples_per_second": 50.428,
1658
- "eval_steps_per_second": 0.776,
1659
- "step": 868
1660
- },
1661
- {
1662
- "epoch": 124.29,
1663
- "learning_rate": 1.942857142857143e-05,
1664
- "loss": 1.5915,
1665
- "step": 870
1666
- },
1667
- {
1668
- "epoch": 125.0,
1669
- "eval_accuracy": 0.6076923076923076,
1670
- "eval_loss": 2.4367971420288086,
1671
- "eval_runtime": 2.4941,
1672
- "eval_samples_per_second": 52.123,
1673
- "eval_steps_per_second": 0.802,
1674
- "step": 875
1675
- },
1676
- {
1677
- "epoch": 125.71,
1678
- "learning_rate": 1.923809523809524e-05,
1679
- "loss": 1.5221,
1680
- "step": 880
1681
- },
1682
- {
1683
- "epoch": 126.0,
1684
- "eval_accuracy": 0.5923076923076923,
1685
- "eval_loss": 2.4394454956054688,
1686
- "eval_runtime": 2.3616,
1687
- "eval_samples_per_second": 55.047,
1688
- "eval_steps_per_second": 0.847,
1689
- "step": 882
1690
- },
1691
- {
1692
- "epoch": 127.0,
1693
- "eval_accuracy": 0.6076923076923076,
1694
- "eval_loss": 2.3990907669067383,
1695
- "eval_runtime": 2.6083,
1696
- "eval_samples_per_second": 49.84,
1697
- "eval_steps_per_second": 0.767,
1698
- "step": 889
1699
- },
1700
- {
1701
- "epoch": 127.14,
1702
- "learning_rate": 1.904761904761905e-05,
1703
- "loss": 1.5231,
1704
- "step": 890
1705
- },
1706
- {
1707
- "epoch": 128.0,
1708
- "eval_accuracy": 0.6153846153846154,
1709
- "eval_loss": 2.3942794799804688,
1710
- "eval_runtime": 2.6252,
1711
- "eval_samples_per_second": 49.519,
1712
- "eval_steps_per_second": 0.762,
1713
- "step": 896
1714
- },
1715
- {
1716
- "epoch": 128.57,
1717
- "learning_rate": 1.885714285714286e-05,
1718
- "loss": 1.4753,
1719
- "step": 900
1720
- },
1721
- {
1722
- "epoch": 129.0,
1723
- "eval_accuracy": 0.6307692307692307,
1724
- "eval_loss": 2.3880045413970947,
1725
- "eval_runtime": 2.4163,
1726
- "eval_samples_per_second": 53.801,
1727
- "eval_steps_per_second": 0.828,
1728
- "step": 903
1729
- },
1730
- {
1731
- "epoch": 130.0,
1732
- "learning_rate": 1.866666666666667e-05,
1733
- "loss": 1.456,
1734
- "step": 910
1735
- },
1736
- {
1737
- "epoch": 130.0,
1738
- "eval_accuracy": 0.6230769230769231,
1739
- "eval_loss": 2.367373466491699,
1740
- "eval_runtime": 2.4274,
1741
- "eval_samples_per_second": 53.556,
1742
- "eval_steps_per_second": 0.824,
1743
- "step": 910
1744
- },
1745
- {
1746
- "epoch": 131.0,
1747
- "eval_accuracy": 0.6,
1748
- "eval_loss": 2.3696095943450928,
1749
- "eval_runtime": 2.3933,
1750
- "eval_samples_per_second": 54.319,
1751
- "eval_steps_per_second": 0.836,
1752
- "step": 917
1753
- },
1754
- {
1755
- "epoch": 131.43,
1756
- "learning_rate": 1.8476190476190478e-05,
1757
- "loss": 1.4353,
1758
- "step": 920
1759
- },
1760
- {
1761
- "epoch": 132.0,
1762
- "eval_accuracy": 0.6153846153846154,
1763
- "eval_loss": 2.3404488563537598,
1764
- "eval_runtime": 2.4129,
1765
- "eval_samples_per_second": 53.877,
1766
- "eval_steps_per_second": 0.829,
1767
- "step": 924
1768
- },
1769
- {
1770
- "epoch": 132.86,
1771
- "learning_rate": 1.8285714285714288e-05,
1772
- "loss": 1.4062,
1773
- "step": 930
1774
- },
1775
- {
1776
- "epoch": 133.0,
1777
- "eval_accuracy": 0.6461538461538462,
1778
- "eval_loss": 2.3368117809295654,
1779
- "eval_runtime": 2.423,
1780
- "eval_samples_per_second": 53.653,
1781
- "eval_steps_per_second": 0.825,
1782
- "step": 931
1783
- },
1784
- {
1785
- "epoch": 134.0,
1786
- "eval_accuracy": 0.6230769230769231,
1787
- "eval_loss": 2.3077619075775146,
1788
- "eval_runtime": 2.4048,
1789
- "eval_samples_per_second": 54.058,
1790
- "eval_steps_per_second": 0.832,
1791
- "step": 938
1792
- },
1793
- {
1794
- "epoch": 134.29,
1795
- "learning_rate": 1.8095238095238097e-05,
1796
- "loss": 1.3738,
1797
- "step": 940
1798
- },
1799
- {
1800
- "epoch": 135.0,
1801
- "eval_accuracy": 0.6307692307692307,
1802
- "eval_loss": 2.3112919330596924,
1803
- "eval_runtime": 2.391,
1804
- "eval_samples_per_second": 54.37,
1805
- "eval_steps_per_second": 0.836,
1806
- "step": 945
1807
- },
1808
- {
1809
- "epoch": 135.71,
1810
- "learning_rate": 1.7904761904761907e-05,
1811
- "loss": 1.3566,
1812
- "step": 950
1813
- },
1814
- {
1815
- "epoch": 136.0,
1816
- "eval_accuracy": 0.6230769230769231,
1817
- "eval_loss": 2.2861361503601074,
1818
- "eval_runtime": 2.4243,
1819
- "eval_samples_per_second": 53.624,
1820
- "eval_steps_per_second": 0.825,
1821
- "step": 952
1822
- },
1823
- {
1824
- "epoch": 137.0,
1825
- "eval_accuracy": 0.6538461538461539,
1826
- "eval_loss": 2.2651076316833496,
1827
- "eval_runtime": 2.4617,
1828
- "eval_samples_per_second": 52.808,
1829
- "eval_steps_per_second": 0.812,
1830
- "step": 959
1831
- },
1832
- {
1833
- "epoch": 137.14,
1834
- "learning_rate": 1.7714285714285717e-05,
1835
- "loss": 1.3463,
1836
- "step": 960
1837
- },
1838
- {
1839
- "epoch": 138.0,
1840
- "eval_accuracy": 0.6615384615384615,
1841
- "eval_loss": 2.2631070613861084,
1842
- "eval_runtime": 2.4282,
1843
- "eval_samples_per_second": 53.537,
1844
- "eval_steps_per_second": 0.824,
1845
- "step": 966
1846
- },
1847
- {
1848
- "epoch": 138.57,
1849
- "learning_rate": 1.7523809523809526e-05,
1850
- "loss": 1.3058,
1851
- "step": 970
1852
- },
1853
- {
1854
- "epoch": 139.0,
1855
- "eval_accuracy": 0.6615384615384615,
1856
- "eval_loss": 2.2603201866149902,
1857
- "eval_runtime": 2.5878,
1858
- "eval_samples_per_second": 50.235,
1859
- "eval_steps_per_second": 0.773,
1860
- "step": 973
1861
- },
1862
- {
1863
- "epoch": 140.0,
1864
- "learning_rate": 1.7333333333333336e-05,
1865
- "loss": 1.2967,
1866
- "step": 980
1867
- },
1868
- {
1869
- "epoch": 140.0,
1870
- "eval_accuracy": 0.6461538461538462,
1871
- "eval_loss": 2.2615151405334473,
1872
- "eval_runtime": 2.5189,
1873
- "eval_samples_per_second": 51.611,
1874
- "eval_steps_per_second": 0.794,
1875
- "step": 980
1876
- },
1877
- {
1878
- "epoch": 141.0,
1879
- "eval_accuracy": 0.6461538461538462,
1880
- "eval_loss": 2.218820810317993,
1881
- "eval_runtime": 2.4255,
1882
- "eval_samples_per_second": 53.597,
1883
- "eval_steps_per_second": 0.825,
1884
- "step": 987
1885
- },
1886
- {
1887
- "epoch": 141.43,
1888
- "learning_rate": 1.7142857142857142e-05,
1889
- "loss": 1.2841,
1890
- "step": 990
1891
- },
1892
- {
1893
- "epoch": 142.0,
1894
- "eval_accuracy": 0.6615384615384615,
1895
- "eval_loss": 2.242035388946533,
1896
- "eval_runtime": 2.4254,
1897
- "eval_samples_per_second": 53.599,
1898
- "eval_steps_per_second": 0.825,
1899
- "step": 994
1900
- },
1901
- {
1902
- "epoch": 142.86,
1903
- "learning_rate": 1.6952380952380955e-05,
1904
- "loss": 1.24,
1905
- "step": 1000
1906
- },
1907
- {
1908
- "epoch": 143.0,
1909
- "eval_accuracy": 0.6307692307692307,
1910
- "eval_loss": 2.226486921310425,
1911
- "eval_runtime": 2.38,
1912
- "eval_samples_per_second": 54.621,
1913
- "eval_steps_per_second": 0.84,
1914
- "step": 1001
1915
- },
1916
- {
1917
- "epoch": 144.0,
1918
- "eval_accuracy": 0.6461538461538462,
1919
- "eval_loss": 2.212893486022949,
1920
- "eval_runtime": 2.5548,
1921
- "eval_samples_per_second": 50.885,
1922
- "eval_steps_per_second": 0.783,
1923
- "step": 1008
1924
- },
1925
- {
1926
- "epoch": 144.29,
1927
- "learning_rate": 1.6761904761904764e-05,
1928
- "loss": 1.2285,
1929
- "step": 1010
1930
- },
1931
- {
1932
- "epoch": 145.0,
1933
- "eval_accuracy": 0.6615384615384615,
1934
- "eval_loss": 2.227419376373291,
1935
- "eval_runtime": 2.4191,
1936
- "eval_samples_per_second": 53.738,
1937
- "eval_steps_per_second": 0.827,
1938
- "step": 1015
1939
- },
1940
- {
1941
- "epoch": 145.71,
1942
- "learning_rate": 1.6571428571428574e-05,
1943
- "loss": 1.2152,
1944
- "step": 1020
1945
- },
1946
- {
1947
- "epoch": 146.0,
1948
- "eval_accuracy": 0.6846153846153846,
1949
- "eval_loss": 2.18461537361145,
1950
- "eval_runtime": 2.34,
1951
- "eval_samples_per_second": 55.556,
1952
- "eval_steps_per_second": 0.855,
1953
- "step": 1022
1954
- },
1955
- {
1956
- "epoch": 147.0,
1957
- "eval_accuracy": 0.6615384615384615,
1958
- "eval_loss": 2.1721718311309814,
1959
- "eval_runtime": 2.5232,
1960
- "eval_samples_per_second": 51.522,
1961
- "eval_steps_per_second": 0.793,
1962
- "step": 1029
1963
- },
1964
- {
1965
- "epoch": 147.14,
1966
- "learning_rate": 1.6380952380952384e-05,
1967
- "loss": 1.187,
1968
- "step": 1030
1969
- },
1970
- {
1971
- "epoch": 148.0,
1972
- "eval_accuracy": 0.6846153846153846,
1973
- "eval_loss": 2.145458936691284,
1974
- "eval_runtime": 2.5242,
1975
- "eval_samples_per_second": 51.501,
1976
- "eval_steps_per_second": 0.792,
1977
- "step": 1036
1978
- },
1979
- {
1980
- "epoch": 148.57,
1981
- "learning_rate": 1.6190476190476193e-05,
1982
- "loss": 1.1794,
1983
- "step": 1040
1984
- },
1985
- {
1986
- "epoch": 149.0,
1987
- "eval_accuracy": 0.7153846153846154,
1988
- "eval_loss": 2.100327730178833,
1989
- "eval_runtime": 2.5331,
1990
- "eval_samples_per_second": 51.321,
1991
- "eval_steps_per_second": 0.79,
1992
- "step": 1043
1993
- },
1994
- {
1995
- "epoch": 150.0,
1996
- "learning_rate": 1.6000000000000003e-05,
1997
- "loss": 1.1654,
1998
- "step": 1050
1999
- },
2000
- {
2001
- "epoch": 150.0,
2002
- "eval_accuracy": 0.7,
2003
- "eval_loss": 2.1009724140167236,
2004
- "eval_runtime": 2.3848,
2005
- "eval_samples_per_second": 54.512,
2006
- "eval_steps_per_second": 0.839,
2007
- "step": 1050
2008
- },
2009
- {
2010
- "epoch": 150.0,
2011
- "step": 1050,
2012
- "total_flos": 6.047083881086976e+18,
2013
- "train_loss": 0.2745589065551758,
2014
- "train_runtime": 1386.6967,
2015
- "train_samples_per_second": 56.249,
2016
- "train_steps_per_second": 0.757
2017
  }
2018
  ],
2019
  "logging_steps": 10,
2020
- "max_steps": 1050,
2021
  "num_input_tokens_seen": 0,
2022
- "num_train_epochs": 150,
2023
  "save_steps": 500,
2024
- "total_flos": 6.047083881086976e+18,
2025
  "train_batch_size": 80,
2026
  "trial_name": null,
2027
  "trial_params": null
 
1
  {
2
+ "best_metric": 2.5402629375457764,
3
+ "best_model_checkpoint": "./drive/MyDrive/repositories/torch_example_image-classification/outputs_letter3/checkpoint-840",
4
+ "epoch": 120.0,
5
  "eval_steps": 500,
6
+ "global_step": 840,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1609
  "train_runtime": 892.0889,
1610
  "train_samples_per_second": 69.948,
1611
  "train_steps_per_second": 0.942
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612
  }
1613
  ],
1614
  "logging_steps": 10,
1615
+ "max_steps": 840,
1616
  "num_input_tokens_seen": 0,
1617
+ "num_train_epochs": 120,
1618
  "save_steps": 500,
1619
+ "total_flos": 4.837667104869581e+18,
1620
  "train_batch_size": 80,
1621
  "trial_name": null,
1622
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02b9794f8e1d194ce8b208459063679907a8cc824c4888328da098de85221d6d
3
- size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77b59a4c9ba56852593021933bca328cab101f76dc48bcef223b400e714cb46c
3
+ size 4856