lectura's picture
Training in progress, epoch 14, checkpoint
0c9260c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 14.892307692307693,
"eval_steps": 500,
"global_step": 121,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 9.975000000000002e-06,
"loss": 12.7837,
"step": 1
},
{
"epoch": 0.25,
"learning_rate": 9.950000000000001e-06,
"loss": 12.4595,
"step": 2
},
{
"epoch": 0.37,
"learning_rate": 9.925e-06,
"loss": 11.9117,
"step": 3
},
{
"epoch": 0.49,
"learning_rate": 9.9e-06,
"loss": 11.662,
"step": 4
},
{
"epoch": 0.62,
"learning_rate": 9.875000000000001e-06,
"loss": 11.4278,
"step": 5
},
{
"epoch": 0.74,
"learning_rate": 9.85e-06,
"loss": 11.1398,
"step": 6
},
{
"epoch": 0.86,
"learning_rate": 9.825000000000002e-06,
"loss": 10.9135,
"step": 7
},
{
"epoch": 0.98,
"learning_rate": 9.800000000000001e-06,
"loss": 10.8274,
"step": 8
},
{
"epoch": 1.11,
"learning_rate": 9.775e-06,
"loss": 10.5068,
"step": 9
},
{
"epoch": 1.23,
"learning_rate": 9.75e-06,
"loss": 10.3551,
"step": 10
},
{
"epoch": 1.35,
"learning_rate": 9.725000000000001e-06,
"loss": 10.1776,
"step": 11
},
{
"epoch": 1.48,
"learning_rate": 9.7e-06,
"loss": 10.0708,
"step": 12
},
{
"epoch": 1.6,
"learning_rate": 9.675000000000001e-06,
"loss": 9.8703,
"step": 13
},
{
"epoch": 1.72,
"learning_rate": 9.65e-06,
"loss": 9.7534,
"step": 14
},
{
"epoch": 1.85,
"learning_rate": 9.625e-06,
"loss": 9.6094,
"step": 15
},
{
"epoch": 1.97,
"learning_rate": 9.600000000000001e-06,
"loss": 9.3989,
"step": 16
},
{
"epoch": 2.09,
"learning_rate": 9.575e-06,
"loss": 9.2753,
"step": 17
},
{
"epoch": 2.22,
"learning_rate": 9.55e-06,
"loss": 9.138,
"step": 18
},
{
"epoch": 2.34,
"learning_rate": 9.525000000000001e-06,
"loss": 9.0598,
"step": 19
},
{
"epoch": 2.46,
"learning_rate": 9.5e-06,
"loss": 8.8535,
"step": 20
},
{
"epoch": 2.58,
"learning_rate": 9.475000000000002e-06,
"loss": 8.7378,
"step": 21
},
{
"epoch": 2.71,
"learning_rate": 9.450000000000001e-06,
"loss": 8.6082,
"step": 22
},
{
"epoch": 2.83,
"learning_rate": 9.425e-06,
"loss": 8.4904,
"step": 23
},
{
"epoch": 2.95,
"learning_rate": 9.4e-06,
"loss": 8.2918,
"step": 24
},
{
"epoch": 3.08,
"learning_rate": 9.375000000000001e-06,
"loss": 8.2553,
"step": 25
},
{
"epoch": 3.2,
"learning_rate": 9.350000000000002e-06,
"loss": 8.0724,
"step": 26
},
{
"epoch": 3.32,
"learning_rate": 9.325000000000001e-06,
"loss": 8.0029,
"step": 27
},
{
"epoch": 3.45,
"learning_rate": 9.3e-06,
"loss": 7.8227,
"step": 28
},
{
"epoch": 3.57,
"learning_rate": 9.275e-06,
"loss": 7.7003,
"step": 29
},
{
"epoch": 3.69,
"learning_rate": 9.250000000000001e-06,
"loss": 7.5943,
"step": 30
},
{
"epoch": 3.82,
"learning_rate": 9.225e-06,
"loss": 7.5119,
"step": 31
},
{
"epoch": 3.94,
"learning_rate": 9.200000000000002e-06,
"loss": 7.3673,
"step": 32
},
{
"epoch": 4.06,
"learning_rate": 9.175000000000001e-06,
"loss": 7.3051,
"step": 33
},
{
"epoch": 4.18,
"learning_rate": 9.15e-06,
"loss": 7.224,
"step": 34
},
{
"epoch": 4.31,
"learning_rate": 9.125e-06,
"loss": 7.0517,
"step": 35
},
{
"epoch": 4.43,
"learning_rate": 9.100000000000001e-06,
"loss": 7.0021,
"step": 36
},
{
"epoch": 4.55,
"learning_rate": 9.075e-06,
"loss": 6.9407,
"step": 37
},
{
"epoch": 4.68,
"learning_rate": 9.050000000000001e-06,
"loss": 6.7762,
"step": 38
},
{
"epoch": 4.8,
"learning_rate": 9.025e-06,
"loss": 6.7675,
"step": 39
},
{
"epoch": 4.92,
"learning_rate": 9e-06,
"loss": 6.6573,
"step": 40
},
{
"epoch": 5.05,
"learning_rate": 8.975e-06,
"loss": 6.6093,
"step": 41
},
{
"epoch": 5.17,
"learning_rate": 8.95e-06,
"loss": 6.5268,
"step": 42
},
{
"epoch": 5.29,
"learning_rate": 8.925e-06,
"loss": 6.4456,
"step": 43
},
{
"epoch": 5.42,
"learning_rate": 8.900000000000001e-06,
"loss": 6.4134,
"step": 44
},
{
"epoch": 5.54,
"learning_rate": 8.875e-06,
"loss": 6.3414,
"step": 45
},
{
"epoch": 5.66,
"learning_rate": 8.85e-06,
"loss": 6.2924,
"step": 46
},
{
"epoch": 5.78,
"learning_rate": 8.825000000000001e-06,
"loss": 6.2176,
"step": 47
},
{
"epoch": 5.91,
"learning_rate": 8.8e-06,
"loss": 6.1821,
"step": 48
},
{
"epoch": 6.03,
"learning_rate": 8.775e-06,
"loss": 6.1344,
"step": 49
},
{
"epoch": 6.15,
"learning_rate": 8.750000000000001e-06,
"loss": 6.0681,
"step": 50
},
{
"epoch": 6.28,
"learning_rate": 8.725000000000002e-06,
"loss": 6.0286,
"step": 51
},
{
"epoch": 6.4,
"learning_rate": 8.700000000000001e-06,
"loss": 5.9494,
"step": 52
},
{
"epoch": 6.52,
"learning_rate": 8.675e-06,
"loss": 5.9192,
"step": 53
},
{
"epoch": 6.65,
"learning_rate": 8.65e-06,
"loss": 5.8799,
"step": 54
},
{
"epoch": 6.77,
"learning_rate": 8.625000000000001e-06,
"loss": 5.8367,
"step": 55
},
{
"epoch": 6.89,
"learning_rate": 8.6e-06,
"loss": 5.7919,
"step": 56
},
{
"epoch": 7.02,
"learning_rate": 8.575000000000002e-06,
"loss": 5.7692,
"step": 57
},
{
"epoch": 7.14,
"learning_rate": 8.550000000000001e-06,
"loss": 5.7151,
"step": 58
},
{
"epoch": 7.26,
"learning_rate": 8.525e-06,
"loss": 5.6839,
"step": 59
},
{
"epoch": 7.38,
"learning_rate": 8.5e-06,
"loss": 5.6504,
"step": 60
},
{
"epoch": 7.51,
"learning_rate": 8.475000000000001e-06,
"loss": 5.6222,
"step": 61
},
{
"epoch": 7.63,
"learning_rate": 8.45e-06,
"loss": 5.5806,
"step": 62
},
{
"epoch": 7.75,
"learning_rate": 8.425000000000001e-06,
"loss": 5.5354,
"step": 63
},
{
"epoch": 7.88,
"learning_rate": 8.400000000000001e-06,
"loss": 5.4984,
"step": 64
},
{
"epoch": 8.0,
"learning_rate": 8.375e-06,
"loss": 5.4788,
"step": 65
},
{
"epoch": 8.12,
"learning_rate": 8.35e-06,
"loss": 5.4293,
"step": 66
},
{
"epoch": 8.25,
"learning_rate": 8.325e-06,
"loss": 5.4256,
"step": 67
},
{
"epoch": 8.37,
"learning_rate": 8.3e-06,
"loss": 5.3499,
"step": 68
},
{
"epoch": 8.49,
"learning_rate": 8.275000000000001e-06,
"loss": 5.3758,
"step": 69
},
{
"epoch": 8.62,
"learning_rate": 8.25e-06,
"loss": 5.3384,
"step": 70
},
{
"epoch": 8.74,
"learning_rate": 8.225e-06,
"loss": 5.3189,
"step": 71
},
{
"epoch": 8.86,
"learning_rate": 8.2e-06,
"loss": 5.2841,
"step": 72
},
{
"epoch": 8.98,
"learning_rate": 8.175e-06,
"loss": 5.2387,
"step": 73
},
{
"epoch": 9.11,
"learning_rate": 8.15e-06,
"loss": 5.229,
"step": 74
},
{
"epoch": 9.23,
"learning_rate": 8.125000000000001e-06,
"loss": 5.1936,
"step": 75
},
{
"epoch": 9.35,
"learning_rate": 8.1e-06,
"loss": 5.1758,
"step": 76
},
{
"epoch": 9.48,
"learning_rate": 8.075000000000001e-06,
"loss": 5.1688,
"step": 77
},
{
"epoch": 9.6,
"learning_rate": 8.050000000000001e-06,
"loss": 5.1475,
"step": 78
},
{
"epoch": 9.72,
"learning_rate": 8.025e-06,
"loss": 5.1028,
"step": 79
},
{
"epoch": 9.85,
"learning_rate": 8.000000000000001e-06,
"loss": 5.0747,
"step": 80
},
{
"epoch": 9.97,
"learning_rate": 7.975e-06,
"loss": 5.0689,
"step": 81
},
{
"epoch": 10.09,
"learning_rate": 7.950000000000002e-06,
"loss": 5.045,
"step": 82
},
{
"epoch": 10.22,
"learning_rate": 7.925000000000001e-06,
"loss": 5.0361,
"step": 83
},
{
"epoch": 10.34,
"learning_rate": 7.9e-06,
"loss": 5.0273,
"step": 84
},
{
"epoch": 10.46,
"learning_rate": 7.875e-06,
"loss": 4.983,
"step": 85
},
{
"epoch": 10.58,
"learning_rate": 7.850000000000001e-06,
"loss": 4.9598,
"step": 86
},
{
"epoch": 10.71,
"learning_rate": 7.825e-06,
"loss": 4.9646,
"step": 87
},
{
"epoch": 10.83,
"learning_rate": 7.800000000000002e-06,
"loss": 4.9083,
"step": 88
},
{
"epoch": 10.95,
"learning_rate": 7.775000000000001e-06,
"loss": 4.906,
"step": 89
},
{
"epoch": 11.08,
"learning_rate": 7.75e-06,
"loss": 4.9062,
"step": 90
},
{
"epoch": 11.2,
"learning_rate": 7.725e-06,
"loss": 4.8742,
"step": 91
},
{
"epoch": 11.32,
"learning_rate": 7.7e-06,
"loss": 4.8425,
"step": 92
},
{
"epoch": 11.45,
"learning_rate": 7.675e-06,
"loss": 4.8332,
"step": 93
},
{
"epoch": 11.57,
"learning_rate": 7.650000000000001e-06,
"loss": 4.8399,
"step": 94
},
{
"epoch": 11.69,
"learning_rate": 7.625e-06,
"loss": 4.8392,
"step": 95
},
{
"epoch": 11.82,
"learning_rate": 7.600000000000001e-06,
"loss": 4.8131,
"step": 96
},
{
"epoch": 11.94,
"learning_rate": 7.575e-06,
"loss": 4.7643,
"step": 97
},
{
"epoch": 12.06,
"learning_rate": 7.5500000000000006e-06,
"loss": 4.7836,
"step": 98
},
{
"epoch": 12.18,
"learning_rate": 7.525e-06,
"loss": 4.7206,
"step": 99
},
{
"epoch": 12.31,
"learning_rate": 7.500000000000001e-06,
"loss": 4.7083,
"step": 100
},
{
"epoch": 12.43,
"learning_rate": 7.475000000000001e-06,
"loss": 4.7409,
"step": 101
},
{
"epoch": 12.55,
"learning_rate": 7.450000000000001e-06,
"loss": 4.7328,
"step": 102
},
{
"epoch": 12.68,
"learning_rate": 7.425000000000001e-06,
"loss": 4.7065,
"step": 103
},
{
"epoch": 12.8,
"learning_rate": 7.4e-06,
"loss": 4.7223,
"step": 104
},
{
"epoch": 12.92,
"learning_rate": 7.375000000000001e-06,
"loss": 4.6878,
"step": 105
},
{
"epoch": 13.05,
"learning_rate": 7.350000000000001e-06,
"loss": 4.6661,
"step": 106
},
{
"epoch": 13.17,
"learning_rate": 7.325000000000001e-06,
"loss": 4.6621,
"step": 107
},
{
"epoch": 13.29,
"learning_rate": 7.3e-06,
"loss": 4.6306,
"step": 108
},
{
"epoch": 13.42,
"learning_rate": 7.275000000000001e-06,
"loss": 4.6375,
"step": 109
},
{
"epoch": 13.54,
"learning_rate": 7.25e-06,
"loss": 4.6358,
"step": 110
},
{
"epoch": 13.66,
"learning_rate": 7.225000000000001e-06,
"loss": 4.5903,
"step": 111
},
{
"epoch": 13.78,
"learning_rate": 7.2000000000000005e-06,
"loss": 4.6222,
"step": 112
},
{
"epoch": 13.91,
"learning_rate": 7.175000000000001e-06,
"loss": 4.5943,
"step": 113
},
{
"epoch": 14.03,
"learning_rate": 7.15e-06,
"loss": 4.594,
"step": 114
},
{
"epoch": 14.15,
"learning_rate": 7.125e-06,
"loss": 4.5893,
"step": 115
},
{
"epoch": 14.28,
"learning_rate": 7.100000000000001e-06,
"loss": 4.5807,
"step": 116
},
{
"epoch": 14.4,
"learning_rate": 7.075000000000001e-06,
"loss": 4.5864,
"step": 117
},
{
"epoch": 14.52,
"learning_rate": 7.05e-06,
"loss": 4.5499,
"step": 118
},
{
"epoch": 14.65,
"learning_rate": 7.0250000000000005e-06,
"loss": 4.5211,
"step": 119
},
{
"epoch": 14.77,
"learning_rate": 7e-06,
"loss": 4.5026,
"step": 120
},
{
"epoch": 14.89,
"learning_rate": 6.975000000000001e-06,
"loss": 4.5158,
"step": 121
}
],
"logging_steps": 1,
"max_steps": 400,
"num_train_epochs": 50,
"save_steps": 500,
"total_flos": 8095545179504640.0,
"trial_name": null,
"trial_params": null
}