pegasus-x-large-book-summary / trainer_state.json
pszemraj's picture
pushing from VM
ade0ddb
raw
history blame
8.61 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9425981873111784,
"global_step": 160,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 6e-05,
"loss": 2.2136,
"step": 2
},
{
"epoch": 0.05,
"learning_rate": 6e-05,
"loss": 2.1745,
"step": 4
},
{
"epoch": 0.07,
"learning_rate": 6e-05,
"loss": 2.2121,
"step": 6
},
{
"epoch": 0.1,
"learning_rate": 6e-05,
"loss": 2.1942,
"step": 8
},
{
"epoch": 0.12,
"learning_rate": 6e-05,
"loss": 2.213,
"step": 10
},
{
"epoch": 0.15,
"learning_rate": 6e-05,
"loss": 2.1835,
"step": 12
},
{
"epoch": 0.17,
"learning_rate": 6e-05,
"loss": 2.2074,
"step": 14
},
{
"epoch": 0.19,
"learning_rate": 6e-05,
"loss": 2.2165,
"step": 16
},
{
"epoch": 0.22,
"learning_rate": 6e-05,
"loss": 2.1942,
"step": 18
},
{
"epoch": 0.24,
"learning_rate": 6e-05,
"loss": 2.1919,
"step": 20
},
{
"epoch": 0.27,
"learning_rate": 6e-05,
"loss": 2.1598,
"step": 22
},
{
"epoch": 0.29,
"learning_rate": 6e-05,
"loss": 2.2082,
"step": 24
},
{
"epoch": 0.31,
"learning_rate": 6e-05,
"loss": 2.1798,
"step": 26
},
{
"epoch": 0.34,
"learning_rate": 6e-05,
"loss": 2.1878,
"step": 28
},
{
"epoch": 0.36,
"learning_rate": 6e-05,
"loss": 2.198,
"step": 30
},
{
"epoch": 0.39,
"learning_rate": 6e-05,
"loss": 2.1458,
"step": 32
},
{
"epoch": 0.41,
"learning_rate": 6e-05,
"loss": 2.1913,
"step": 34
},
{
"epoch": 0.44,
"learning_rate": 6e-05,
"loss": 2.1581,
"step": 36
},
{
"epoch": 0.46,
"learning_rate": 6e-05,
"loss": 2.1761,
"step": 38
},
{
"epoch": 0.48,
"learning_rate": 6e-05,
"loss": 2.1474,
"step": 40
},
{
"epoch": 0.51,
"learning_rate": 6e-05,
"loss": 2.1736,
"step": 42
},
{
"epoch": 0.53,
"learning_rate": 6e-05,
"loss": 2.1988,
"step": 44
},
{
"epoch": 0.56,
"learning_rate": 6e-05,
"loss": 2.1796,
"step": 46
},
{
"epoch": 0.58,
"learning_rate": 6e-05,
"loss": 2.2004,
"step": 48
},
{
"epoch": 0.6,
"learning_rate": 6e-05,
"loss": 2.2067,
"step": 50
},
{
"epoch": 0.63,
"learning_rate": 6e-05,
"loss": 2.1998,
"step": 52
},
{
"epoch": 0.65,
"learning_rate": 6e-05,
"loss": 2.2015,
"step": 54
},
{
"epoch": 0.68,
"learning_rate": 6e-05,
"loss": 2.2152,
"step": 56
},
{
"epoch": 0.7,
"learning_rate": 6e-05,
"loss": 2.1614,
"step": 58
},
{
"epoch": 0.73,
"learning_rate": 6e-05,
"loss": 2.1996,
"step": 60
},
{
"epoch": 0.75,
"learning_rate": 6e-05,
"loss": 2.1909,
"step": 62
},
{
"epoch": 0.77,
"learning_rate": 6e-05,
"loss": 2.208,
"step": 64
},
{
"epoch": 0.8,
"learning_rate": 6e-05,
"loss": 2.1879,
"step": 66
},
{
"epoch": 0.82,
"learning_rate": 6e-05,
"loss": 2.1759,
"step": 68
},
{
"epoch": 0.85,
"learning_rate": 6e-05,
"loss": 2.1351,
"step": 70
},
{
"epoch": 0.87,
"learning_rate": 6e-05,
"loss": 2.1713,
"step": 72
},
{
"epoch": 0.89,
"learning_rate": 6e-05,
"loss": 2.1473,
"step": 74
},
{
"epoch": 0.92,
"learning_rate": 6e-05,
"loss": 2.1646,
"step": 76
},
{
"epoch": 0.94,
"learning_rate": 6e-05,
"loss": 2.1772,
"step": 78
},
{
"epoch": 0.97,
"learning_rate": 6e-05,
"loss": 2.1615,
"step": 80
},
{
"epoch": 0.99,
"learning_rate": 6e-05,
"loss": 2.1634,
"step": 82
},
{
"epoch": 1.02,
"learning_rate": 6e-05,
"loss": 3.03,
"step": 84
},
{
"epoch": 1.05,
"learning_rate": 6e-05,
"loss": 2.1659,
"step": 86
},
{
"epoch": 1.07,
"learning_rate": 6e-05,
"loss": 2.147,
"step": 88
},
{
"epoch": 1.1,
"learning_rate": 6e-05,
"loss": 2.1871,
"step": 90
},
{
"epoch": 1.12,
"learning_rate": 6e-05,
"loss": 2.1916,
"step": 92
},
{
"epoch": 1.15,
"learning_rate": 6e-05,
"loss": 2.1893,
"step": 94
},
{
"epoch": 1.17,
"learning_rate": 6e-05,
"loss": 2.17,
"step": 96
},
{
"epoch": 1.19,
"learning_rate": 6e-05,
"loss": 2.1516,
"step": 98
},
{
"epoch": 1.22,
"learning_rate": 6e-05,
"loss": 2.1433,
"step": 100
},
{
"epoch": 1.24,
"learning_rate": 6e-05,
"loss": 2.1705,
"step": 102
},
{
"epoch": 1.27,
"learning_rate": 6e-05,
"loss": 2.2087,
"step": 104
},
{
"epoch": 1.29,
"learning_rate": 6e-05,
"loss": 2.2392,
"step": 106
},
{
"epoch": 1.31,
"learning_rate": 6e-05,
"loss": 2.221,
"step": 108
},
{
"epoch": 1.34,
"learning_rate": 6e-05,
"loss": 2.1764,
"step": 110
},
{
"epoch": 1.36,
"learning_rate": 6e-05,
"loss": 2.1441,
"step": 112
},
{
"epoch": 1.39,
"learning_rate": 6e-05,
"loss": 2.2088,
"step": 114
},
{
"epoch": 1.41,
"learning_rate": 6e-05,
"loss": 2.137,
"step": 116
},
{
"epoch": 1.44,
"learning_rate": 6e-05,
"loss": 2.143,
"step": 118
},
{
"epoch": 1.46,
"learning_rate": 6e-05,
"loss": 2.1853,
"step": 120
},
{
"epoch": 1.48,
"learning_rate": 6e-05,
"loss": 2.1816,
"step": 122
},
{
"epoch": 1.51,
"learning_rate": 6e-05,
"loss": 2.1805,
"step": 124
},
{
"epoch": 1.53,
"learning_rate": 6e-05,
"loss": 2.2074,
"step": 126
},
{
"epoch": 1.56,
"learning_rate": 6e-05,
"loss": 2.1545,
"step": 128
},
{
"epoch": 1.58,
"learning_rate": 6e-05,
"loss": 2.1655,
"step": 130
},
{
"epoch": 1.6,
"learning_rate": 6e-05,
"loss": 2.1827,
"step": 132
},
{
"epoch": 1.63,
"learning_rate": 6e-05,
"loss": 2.1936,
"step": 134
},
{
"epoch": 1.65,
"learning_rate": 6e-05,
"loss": 2.1696,
"step": 136
},
{
"epoch": 1.68,
"learning_rate": 6e-05,
"loss": 2.2112,
"step": 138
},
{
"epoch": 1.7,
"learning_rate": 6e-05,
"loss": 2.2027,
"step": 140
},
{
"epoch": 1.73,
"learning_rate": 6e-05,
"loss": 2.1224,
"step": 142
},
{
"epoch": 1.75,
"learning_rate": 6e-05,
"loss": 2.2016,
"step": 144
},
{
"epoch": 1.77,
"learning_rate": 6e-05,
"loss": 2.1465,
"step": 146
},
{
"epoch": 1.8,
"learning_rate": 6e-05,
"loss": 2.1658,
"step": 148
},
{
"epoch": 1.82,
"learning_rate": 6e-05,
"loss": 2.188,
"step": 150
},
{
"epoch": 1.85,
"learning_rate": 6e-05,
"loss": 2.1806,
"step": 152
},
{
"epoch": 1.87,
"learning_rate": 6e-05,
"loss": 2.221,
"step": 154
},
{
"epoch": 1.89,
"learning_rate": 6e-05,
"loss": 2.1824,
"step": 156
},
{
"epoch": 1.92,
"learning_rate": 6e-05,
"loss": 2.147,
"step": 158
},
{
"epoch": 1.94,
"learning_rate": 6e-05,
"loss": 2.1873,
"step": 160
}
],
"max_steps": 164,
"num_train_epochs": 2,
"total_flos": 7.131255647638651e+17,
"trial_name": null,
"trial_params": null
}