llama3_truth_model / trainer_state.json
Ogamon's picture
Initial commit
f11a4a4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.951768488745981,
"eval_steps": 500,
"global_step": 385,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012861736334405145,
"grad_norm": 359.0199890136719,
"learning_rate": 8.333333333333335e-09,
"loss": 7.8034,
"num_input_tokens_seen": 13440,
"step": 1
},
{
"epoch": 0.02572347266881029,
"grad_norm": 359.18682861328125,
"learning_rate": 1.666666666666667e-08,
"loss": 7.7577,
"num_input_tokens_seen": 26976,
"step": 2
},
{
"epoch": 0.03858520900321544,
"grad_norm": 368.04339599609375,
"learning_rate": 2.5000000000000002e-08,
"loss": 7.8132,
"num_input_tokens_seen": 40800,
"step": 3
},
{
"epoch": 0.05144694533762058,
"grad_norm": 357.04132080078125,
"learning_rate": 3.333333333333334e-08,
"loss": 7.7906,
"num_input_tokens_seen": 54080,
"step": 4
},
{
"epoch": 0.06430868167202572,
"grad_norm": 361.8187255859375,
"learning_rate": 4.166666666666667e-08,
"loss": 7.7763,
"num_input_tokens_seen": 67552,
"step": 5
},
{
"epoch": 0.07717041800643087,
"grad_norm": 368.778076171875,
"learning_rate": 5.0000000000000004e-08,
"loss": 7.7824,
"num_input_tokens_seen": 81600,
"step": 6
},
{
"epoch": 0.09003215434083602,
"grad_norm": 367.5040588378906,
"learning_rate": 5.833333333333334e-08,
"loss": 7.833,
"num_input_tokens_seen": 94720,
"step": 7
},
{
"epoch": 0.10289389067524116,
"grad_norm": 366.2318420410156,
"learning_rate": 6.666666666666668e-08,
"loss": 7.6949,
"num_input_tokens_seen": 107488,
"step": 8
},
{
"epoch": 0.1157556270096463,
"grad_norm": 366.990966796875,
"learning_rate": 7.500000000000001e-08,
"loss": 7.8336,
"num_input_tokens_seen": 120992,
"step": 9
},
{
"epoch": 0.12861736334405144,
"grad_norm": 357.8764953613281,
"learning_rate": 8.333333333333334e-08,
"loss": 7.7282,
"num_input_tokens_seen": 134272,
"step": 10
},
{
"epoch": 0.1414790996784566,
"grad_norm": 363.4705505371094,
"learning_rate": 9.166666666666668e-08,
"loss": 7.6916,
"num_input_tokens_seen": 148096,
"step": 11
},
{
"epoch": 0.15434083601286175,
"grad_norm": 365.1483154296875,
"learning_rate": 1.0000000000000001e-07,
"loss": 7.7333,
"num_input_tokens_seen": 160736,
"step": 12
},
{
"epoch": 0.16720257234726688,
"grad_norm": 353.99609375,
"learning_rate": 1.0833333333333335e-07,
"loss": 7.6017,
"num_input_tokens_seen": 173376,
"step": 13
},
{
"epoch": 0.18006430868167203,
"grad_norm": 357.7409362792969,
"learning_rate": 1.1666666666666668e-07,
"loss": 7.644,
"num_input_tokens_seen": 187136,
"step": 14
},
{
"epoch": 0.19292604501607716,
"grad_norm": 357.3723449707031,
"learning_rate": 1.2500000000000002e-07,
"loss": 7.5965,
"num_input_tokens_seen": 200192,
"step": 15
},
{
"epoch": 0.2057877813504823,
"grad_norm": 352.3059997558594,
"learning_rate": 1.3333333333333336e-07,
"loss": 7.5883,
"num_input_tokens_seen": 213856,
"step": 16
},
{
"epoch": 0.21864951768488747,
"grad_norm": 349.4331970214844,
"learning_rate": 1.4166666666666668e-07,
"loss": 7.2464,
"num_input_tokens_seen": 226944,
"step": 17
},
{
"epoch": 0.2315112540192926,
"grad_norm": 346.9617004394531,
"learning_rate": 1.5000000000000002e-07,
"loss": 7.3133,
"num_input_tokens_seen": 240640,
"step": 18
},
{
"epoch": 0.24437299035369775,
"grad_norm": 344.31512451171875,
"learning_rate": 1.5833333333333336e-07,
"loss": 7.2133,
"num_input_tokens_seen": 253280,
"step": 19
},
{
"epoch": 0.2572347266881029,
"grad_norm": 348.57623291015625,
"learning_rate": 1.6666666666666668e-07,
"loss": 7.2431,
"num_input_tokens_seen": 266400,
"step": 20
},
{
"epoch": 0.27009646302250806,
"grad_norm": 345.67694091796875,
"learning_rate": 1.7500000000000002e-07,
"loss": 7.1875,
"num_input_tokens_seen": 280416,
"step": 21
},
{
"epoch": 0.2829581993569132,
"grad_norm": 336.11968994140625,
"learning_rate": 1.8333333333333336e-07,
"loss": 7.0659,
"num_input_tokens_seen": 294368,
"step": 22
},
{
"epoch": 0.2958199356913183,
"grad_norm": 348.02783203125,
"learning_rate": 1.9166666666666668e-07,
"loss": 6.3595,
"num_input_tokens_seen": 307840,
"step": 23
},
{
"epoch": 0.3086816720257235,
"grad_norm": 306.9109802246094,
"learning_rate": 2.0000000000000002e-07,
"loss": 6.0417,
"num_input_tokens_seen": 322144,
"step": 24
},
{
"epoch": 0.3215434083601286,
"grad_norm": 308.7346496582031,
"learning_rate": 2.0833333333333333e-07,
"loss": 5.9894,
"num_input_tokens_seen": 335424,
"step": 25
},
{
"epoch": 0.33440514469453375,
"grad_norm": 303.3129577636719,
"learning_rate": 2.166666666666667e-07,
"loss": 5.9259,
"num_input_tokens_seen": 349376,
"step": 26
},
{
"epoch": 0.34726688102893893,
"grad_norm": 305.47637939453125,
"learning_rate": 2.2500000000000002e-07,
"loss": 5.8983,
"num_input_tokens_seen": 362944,
"step": 27
},
{
"epoch": 0.36012861736334406,
"grad_norm": 306.3539123535156,
"learning_rate": 2.3333333333333336e-07,
"loss": 5.6848,
"num_input_tokens_seen": 376352,
"step": 28
},
{
"epoch": 0.3729903536977492,
"grad_norm": 300.50201416015625,
"learning_rate": 2.416666666666667e-07,
"loss": 5.5649,
"num_input_tokens_seen": 389312,
"step": 29
},
{
"epoch": 0.3858520900321543,
"grad_norm": 297.82757568359375,
"learning_rate": 2.5000000000000004e-07,
"loss": 5.4642,
"num_input_tokens_seen": 402848,
"step": 30
},
{
"epoch": 0.3987138263665595,
"grad_norm": 348.0853271484375,
"learning_rate": 2.5833333333333333e-07,
"loss": 4.7955,
"num_input_tokens_seen": 416352,
"step": 31
},
{
"epoch": 0.4115755627009646,
"grad_norm": 346.5870361328125,
"learning_rate": 2.666666666666667e-07,
"loss": 2.8339,
"num_input_tokens_seen": 429920,
"step": 32
},
{
"epoch": 0.42443729903536975,
"grad_norm": 327.6314697265625,
"learning_rate": 2.75e-07,
"loss": 2.4477,
"num_input_tokens_seen": 444064,
"step": 33
},
{
"epoch": 0.43729903536977494,
"grad_norm": 321.9542236328125,
"learning_rate": 2.8333333333333336e-07,
"loss": 2.3331,
"num_input_tokens_seen": 458304,
"step": 34
},
{
"epoch": 0.45016077170418006,
"grad_norm": 327.8713073730469,
"learning_rate": 2.916666666666667e-07,
"loss": 2.2143,
"num_input_tokens_seen": 471904,
"step": 35
},
{
"epoch": 0.4630225080385852,
"grad_norm": 317.32427978515625,
"learning_rate": 3.0000000000000004e-07,
"loss": 2.0067,
"num_input_tokens_seen": 484768,
"step": 36
},
{
"epoch": 0.4758842443729904,
"grad_norm": 251.88111877441406,
"learning_rate": 3.083333333333334e-07,
"loss": 1.7702,
"num_input_tokens_seen": 497696,
"step": 37
},
{
"epoch": 0.4887459807073955,
"grad_norm": 273.716064453125,
"learning_rate": 3.166666666666667e-07,
"loss": 1.5557,
"num_input_tokens_seen": 511072,
"step": 38
},
{
"epoch": 0.5016077170418006,
"grad_norm": 293.1105651855469,
"learning_rate": 3.25e-07,
"loss": 1.3024,
"num_input_tokens_seen": 524576,
"step": 39
},
{
"epoch": 0.5144694533762058,
"grad_norm": 243.40711975097656,
"learning_rate": 3.3333333333333335e-07,
"loss": 1.1652,
"num_input_tokens_seen": 538112,
"step": 40
},
{
"epoch": 0.5273311897106109,
"grad_norm": 111.22821807861328,
"learning_rate": 3.416666666666667e-07,
"loss": 0.6839,
"num_input_tokens_seen": 552096,
"step": 41
},
{
"epoch": 0.5401929260450161,
"grad_norm": 104.02100372314453,
"learning_rate": 3.5000000000000004e-07,
"loss": 0.4774,
"num_input_tokens_seen": 565920,
"step": 42
},
{
"epoch": 0.5530546623794212,
"grad_norm": 32.459312438964844,
"learning_rate": 3.583333333333334e-07,
"loss": 0.3841,
"num_input_tokens_seen": 579200,
"step": 43
},
{
"epoch": 0.5659163987138264,
"grad_norm": 15.353242874145508,
"learning_rate": 3.666666666666667e-07,
"loss": 0.3588,
"num_input_tokens_seen": 592864,
"step": 44
},
{
"epoch": 0.5787781350482315,
"grad_norm": 48.64632034301758,
"learning_rate": 3.75e-07,
"loss": 0.3628,
"num_input_tokens_seen": 606656,
"step": 45
},
{
"epoch": 0.5916398713826366,
"grad_norm": 34.02824020385742,
"learning_rate": 3.8333333333333335e-07,
"loss": 0.3426,
"num_input_tokens_seen": 619968,
"step": 46
},
{
"epoch": 0.6045016077170418,
"grad_norm": 15.54571533203125,
"learning_rate": 3.9166666666666675e-07,
"loss": 0.3279,
"num_input_tokens_seen": 633632,
"step": 47
},
{
"epoch": 0.617363344051447,
"grad_norm": 101.66536712646484,
"learning_rate": 4.0000000000000003e-07,
"loss": 0.3947,
"num_input_tokens_seen": 647360,
"step": 48
},
{
"epoch": 0.6302250803858521,
"grad_norm": 39.69755935668945,
"learning_rate": 4.083333333333334e-07,
"loss": 0.3075,
"num_input_tokens_seen": 661504,
"step": 49
},
{
"epoch": 0.6430868167202572,
"grad_norm": 23.356718063354492,
"learning_rate": 4.1666666666666667e-07,
"loss": 0.3236,
"num_input_tokens_seen": 675424,
"step": 50
},
{
"epoch": 0.6559485530546624,
"grad_norm": 74.00253295898438,
"learning_rate": 4.2500000000000006e-07,
"loss": 0.3557,
"num_input_tokens_seen": 688800,
"step": 51
},
{
"epoch": 0.6688102893890675,
"grad_norm": 109.3729248046875,
"learning_rate": 4.333333333333334e-07,
"loss": 0.4008,
"num_input_tokens_seen": 702208,
"step": 52
},
{
"epoch": 0.6816720257234726,
"grad_norm": 81.13125610351562,
"learning_rate": 4.416666666666667e-07,
"loss": 0.3586,
"num_input_tokens_seen": 716096,
"step": 53
},
{
"epoch": 0.6945337620578779,
"grad_norm": 24.126953125,
"learning_rate": 4.5000000000000003e-07,
"loss": 0.3023,
"num_input_tokens_seen": 728672,
"step": 54
},
{
"epoch": 0.707395498392283,
"grad_norm": 77.53913879394531,
"learning_rate": 4.583333333333333e-07,
"loss": 0.3547,
"num_input_tokens_seen": 742912,
"step": 55
},
{
"epoch": 0.7202572347266881,
"grad_norm": 95.46034240722656,
"learning_rate": 4.666666666666667e-07,
"loss": 0.3846,
"num_input_tokens_seen": 755808,
"step": 56
},
{
"epoch": 0.7331189710610932,
"grad_norm": 86.11170959472656,
"learning_rate": 4.7500000000000006e-07,
"loss": 0.3743,
"num_input_tokens_seen": 769696,
"step": 57
},
{
"epoch": 0.7459807073954984,
"grad_norm": 31.460582733154297,
"learning_rate": 4.833333333333334e-07,
"loss": 0.3091,
"num_input_tokens_seen": 783680,
"step": 58
},
{
"epoch": 0.7588424437299035,
"grad_norm": 34.459659576416016,
"learning_rate": 4.916666666666667e-07,
"loss": 0.3094,
"num_input_tokens_seen": 796672,
"step": 59
},
{
"epoch": 0.7717041800643086,
"grad_norm": 51.55731201171875,
"learning_rate": 5.000000000000001e-07,
"loss": 0.3309,
"num_input_tokens_seen": 810304,
"step": 60
},
{
"epoch": 0.7845659163987139,
"grad_norm": 35.633277893066406,
"learning_rate": 5.083333333333334e-07,
"loss": 0.3276,
"num_input_tokens_seen": 824160,
"step": 61
},
{
"epoch": 0.797427652733119,
"grad_norm": 9.230757713317871,
"learning_rate": 5.166666666666667e-07,
"loss": 0.3084,
"num_input_tokens_seen": 837312,
"step": 62
},
{
"epoch": 0.8102893890675241,
"grad_norm": 35.229454040527344,
"learning_rate": 5.250000000000001e-07,
"loss": 0.3182,
"num_input_tokens_seen": 851168,
"step": 63
},
{
"epoch": 0.8231511254019293,
"grad_norm": 47.88737487792969,
"learning_rate": 5.333333333333335e-07,
"loss": 0.3469,
"num_input_tokens_seen": 864096,
"step": 64
},
{
"epoch": 0.8360128617363344,
"grad_norm": 38.481746673583984,
"learning_rate": 5.416666666666667e-07,
"loss": 0.3253,
"num_input_tokens_seen": 878048,
"step": 65
},
{
"epoch": 0.8488745980707395,
"grad_norm": 11.656455993652344,
"learning_rate": 5.5e-07,
"loss": 0.2746,
"num_input_tokens_seen": 891904,
"step": 66
},
{
"epoch": 0.8617363344051447,
"grad_norm": 18.758018493652344,
"learning_rate": 5.583333333333333e-07,
"loss": 0.2893,
"num_input_tokens_seen": 906016,
"step": 67
},
{
"epoch": 0.8745980707395499,
"grad_norm": 19.316091537475586,
"learning_rate": 5.666666666666667e-07,
"loss": 0.2827,
"num_input_tokens_seen": 918944,
"step": 68
},
{
"epoch": 0.887459807073955,
"grad_norm": 32.92877960205078,
"learning_rate": 5.750000000000001e-07,
"loss": 0.2978,
"num_input_tokens_seen": 932672,
"step": 69
},
{
"epoch": 0.9003215434083601,
"grad_norm": 14.833833694458008,
"learning_rate": 5.833333333333334e-07,
"loss": 0.2703,
"num_input_tokens_seen": 945248,
"step": 70
},
{
"epoch": 0.9131832797427653,
"grad_norm": 23.44466209411621,
"learning_rate": 5.916666666666667e-07,
"loss": 0.2968,
"num_input_tokens_seen": 958784,
"step": 71
},
{
"epoch": 0.9260450160771704,
"grad_norm": 56.151981353759766,
"learning_rate": 6.000000000000001e-07,
"loss": 0.3035,
"num_input_tokens_seen": 972416,
"step": 72
},
{
"epoch": 0.9389067524115756,
"grad_norm": 77.62692260742188,
"learning_rate": 6.083333333333334e-07,
"loss": 0.3211,
"num_input_tokens_seen": 985888,
"step": 73
},
{
"epoch": 0.9517684887459807,
"grad_norm": 65.45875549316406,
"learning_rate": 6.166666666666668e-07,
"loss": 0.2913,
"num_input_tokens_seen": 999296,
"step": 74
},
{
"epoch": 0.9646302250803859,
"grad_norm": 16.159313201904297,
"learning_rate": 6.25e-07,
"loss": 0.2817,
"num_input_tokens_seen": 1012640,
"step": 75
},
{
"epoch": 0.977491961414791,
"grad_norm": 33.846290588378906,
"learning_rate": 6.333333333333334e-07,
"loss": 0.2827,
"num_input_tokens_seen": 1026112,
"step": 76
},
{
"epoch": 0.9903536977491961,
"grad_norm": 9.542298316955566,
"learning_rate": 6.416666666666667e-07,
"loss": 0.229,
"num_input_tokens_seen": 1039424,
"step": 77
},
{
"epoch": 1.0032154340836013,
"grad_norm": 10.284690856933594,
"learning_rate": 6.5e-07,
"loss": 0.2503,
"num_input_tokens_seen": 1053408,
"step": 78
},
{
"epoch": 1.0160771704180065,
"grad_norm": 19.490089416503906,
"learning_rate": 6.583333333333333e-07,
"loss": 0.2453,
"num_input_tokens_seen": 1067328,
"step": 79
},
{
"epoch": 1.0289389067524115,
"grad_norm": 18.234272003173828,
"learning_rate": 6.666666666666667e-07,
"loss": 0.2167,
"num_input_tokens_seen": 1080288,
"step": 80
},
{
"epoch": 1.0418006430868167,
"grad_norm": 9.240696907043457,
"learning_rate": 6.750000000000001e-07,
"loss": 0.2361,
"num_input_tokens_seen": 1093952,
"step": 81
},
{
"epoch": 1.0546623794212218,
"grad_norm": 22.255823135375977,
"learning_rate": 6.833333333333334e-07,
"loss": 0.2248,
"num_input_tokens_seen": 1107904,
"step": 82
},
{
"epoch": 1.067524115755627,
"grad_norm": 26.029281616210938,
"learning_rate": 6.916666666666668e-07,
"loss": 0.2491,
"num_input_tokens_seen": 1121120,
"step": 83
},
{
"epoch": 1.0803858520900322,
"grad_norm": 34.11492156982422,
"learning_rate": 7.000000000000001e-07,
"loss": 0.2352,
"num_input_tokens_seen": 1135040,
"step": 84
},
{
"epoch": 1.0932475884244373,
"grad_norm": 65.68599700927734,
"learning_rate": 7.083333333333334e-07,
"loss": 0.2365,
"num_input_tokens_seen": 1148992,
"step": 85
},
{
"epoch": 1.1061093247588425,
"grad_norm": 72.81002807617188,
"learning_rate": 7.166666666666668e-07,
"loss": 0.217,
"num_input_tokens_seen": 1162592,
"step": 86
},
{
"epoch": 1.1189710610932475,
"grad_norm": 21.043798446655273,
"learning_rate": 7.25e-07,
"loss": 0.2258,
"num_input_tokens_seen": 1175104,
"step": 87
},
{
"epoch": 1.1318327974276527,
"grad_norm": 71.2937240600586,
"learning_rate": 7.333333333333334e-07,
"loss": 0.245,
"num_input_tokens_seen": 1189376,
"step": 88
},
{
"epoch": 1.144694533762058,
"grad_norm": 106.12057495117188,
"learning_rate": 7.416666666666668e-07,
"loss": 0.3132,
"num_input_tokens_seen": 1202560,
"step": 89
},
{
"epoch": 1.157556270096463,
"grad_norm": 84.74403381347656,
"learning_rate": 7.5e-07,
"loss": 0.284,
"num_input_tokens_seen": 1216832,
"step": 90
},
{
"epoch": 1.1704180064308682,
"grad_norm": 10.08238410949707,
"learning_rate": 7.583333333333334e-07,
"loss": 0.1933,
"num_input_tokens_seen": 1230528,
"step": 91
},
{
"epoch": 1.1832797427652733,
"grad_norm": 56.27880859375,
"learning_rate": 7.666666666666667e-07,
"loss": 0.2154,
"num_input_tokens_seen": 1244352,
"step": 92
},
{
"epoch": 1.1961414790996785,
"grad_norm": 39.58720779418945,
"learning_rate": 7.750000000000001e-07,
"loss": 0.2064,
"num_input_tokens_seen": 1257472,
"step": 93
},
{
"epoch": 1.2090032154340835,
"grad_norm": 12.087499618530273,
"learning_rate": 7.833333333333335e-07,
"loss": 0.2038,
"num_input_tokens_seen": 1271392,
"step": 94
},
{
"epoch": 1.2218649517684887,
"grad_norm": 29.51178550720215,
"learning_rate": 7.916666666666667e-07,
"loss": 0.2152,
"num_input_tokens_seen": 1286432,
"step": 95
},
{
"epoch": 1.234726688102894,
"grad_norm": 12.675917625427246,
"learning_rate": 8.000000000000001e-07,
"loss": 0.1961,
"num_input_tokens_seen": 1300096,
"step": 96
},
{
"epoch": 1.247588424437299,
"grad_norm": 11.436767578125,
"learning_rate": 8.083333333333334e-07,
"loss": 0.1772,
"num_input_tokens_seen": 1313568,
"step": 97
},
{
"epoch": 1.2604501607717042,
"grad_norm": 25.415918350219727,
"learning_rate": 8.166666666666668e-07,
"loss": 0.1846,
"num_input_tokens_seen": 1327328,
"step": 98
},
{
"epoch": 1.2733118971061093,
"grad_norm": 11.161438941955566,
"learning_rate": 8.250000000000001e-07,
"loss": 0.1823,
"num_input_tokens_seen": 1340960,
"step": 99
},
{
"epoch": 1.2861736334405145,
"grad_norm": 7.966615200042725,
"learning_rate": 8.333333333333333e-07,
"loss": 0.1794,
"num_input_tokens_seen": 1353440,
"step": 100
},
{
"epoch": 1.2990353697749195,
"grad_norm": 32.296085357666016,
"learning_rate": 8.416666666666667e-07,
"loss": 0.2106,
"num_input_tokens_seen": 1367680,
"step": 101
},
{
"epoch": 1.3118971061093248,
"grad_norm": 26.521038055419922,
"learning_rate": 8.500000000000001e-07,
"loss": 0.2123,
"num_input_tokens_seen": 1380864,
"step": 102
},
{
"epoch": 1.32475884244373,
"grad_norm": 63.48737716674805,
"learning_rate": 8.583333333333334e-07,
"loss": 0.2413,
"num_input_tokens_seen": 1393888,
"step": 103
},
{
"epoch": 1.337620578778135,
"grad_norm": 55.781776428222656,
"learning_rate": 8.666666666666668e-07,
"loss": 0.2334,
"num_input_tokens_seen": 1406912,
"step": 104
},
{
"epoch": 1.3504823151125402,
"grad_norm": 13.036691665649414,
"learning_rate": 8.75e-07,
"loss": 0.2069,
"num_input_tokens_seen": 1420000,
"step": 105
},
{
"epoch": 1.3633440514469453,
"grad_norm": 45.98344421386719,
"learning_rate": 8.833333333333334e-07,
"loss": 0.2262,
"num_input_tokens_seen": 1433440,
"step": 106
},
{
"epoch": 1.3762057877813505,
"grad_norm": 34.49677276611328,
"learning_rate": 8.916666666666668e-07,
"loss": 0.1718,
"num_input_tokens_seen": 1446560,
"step": 107
},
{
"epoch": 1.3890675241157555,
"grad_norm": 21.14679718017578,
"learning_rate": 9.000000000000001e-07,
"loss": 0.204,
"num_input_tokens_seen": 1460320,
"step": 108
},
{
"epoch": 1.4019292604501608,
"grad_norm": 26.53287124633789,
"learning_rate": 9.083333333333335e-07,
"loss": 0.1849,
"num_input_tokens_seen": 1474272,
"step": 109
},
{
"epoch": 1.414790996784566,
"grad_norm": 34.011962890625,
"learning_rate": 9.166666666666666e-07,
"loss": 0.2028,
"num_input_tokens_seen": 1487904,
"step": 110
},
{
"epoch": 1.427652733118971,
"grad_norm": 17.4981689453125,
"learning_rate": 9.25e-07,
"loss": 0.179,
"num_input_tokens_seen": 1501664,
"step": 111
},
{
"epoch": 1.4405144694533762,
"grad_norm": 20.183582305908203,
"learning_rate": 9.333333333333334e-07,
"loss": 0.1813,
"num_input_tokens_seen": 1515264,
"step": 112
},
{
"epoch": 1.4533762057877815,
"grad_norm": 26.655994415283203,
"learning_rate": 9.416666666666667e-07,
"loss": 0.1955,
"num_input_tokens_seen": 1528640,
"step": 113
},
{
"epoch": 1.4662379421221865,
"grad_norm": 25.745370864868164,
"learning_rate": 9.500000000000001e-07,
"loss": 0.1577,
"num_input_tokens_seen": 1541632,
"step": 114
},
{
"epoch": 1.4790996784565915,
"grad_norm": 22.924352645874023,
"learning_rate": 9.583333333333334e-07,
"loss": 0.1509,
"num_input_tokens_seen": 1554592,
"step": 115
},
{
"epoch": 1.4919614147909968,
"grad_norm": 26.520856857299805,
"learning_rate": 9.666666666666668e-07,
"loss": 0.2052,
"num_input_tokens_seen": 1567296,
"step": 116
},
{
"epoch": 1.504823151125402,
"grad_norm": 23.989120483398438,
"learning_rate": 9.750000000000002e-07,
"loss": 0.1576,
"num_input_tokens_seen": 1580800,
"step": 117
},
{
"epoch": 1.517684887459807,
"grad_norm": 11.067609786987305,
"learning_rate": 9.833333333333334e-07,
"loss": 0.1459,
"num_input_tokens_seen": 1593792,
"step": 118
},
{
"epoch": 1.5305466237942122,
"grad_norm": 41.14239501953125,
"learning_rate": 9.916666666666668e-07,
"loss": 0.2694,
"num_input_tokens_seen": 1607648,
"step": 119
},
{
"epoch": 1.5434083601286175,
"grad_norm": 17.72841453552246,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.1891,
"num_input_tokens_seen": 1621184,
"step": 120
},
{
"epoch": 1.5562700964630225,
"grad_norm": 13.838713645935059,
"learning_rate": 1.0083333333333333e-06,
"loss": 0.1655,
"num_input_tokens_seen": 1634240,
"step": 121
},
{
"epoch": 1.5691318327974275,
"grad_norm": 17.73114013671875,
"learning_rate": 1.0166666666666667e-06,
"loss": 0.1534,
"num_input_tokens_seen": 1647712,
"step": 122
},
{
"epoch": 1.5819935691318328,
"grad_norm": 7.044174671173096,
"learning_rate": 1.025e-06,
"loss": 0.1373,
"num_input_tokens_seen": 1661344,
"step": 123
},
{
"epoch": 1.594855305466238,
"grad_norm": 13.696454048156738,
"learning_rate": 1.0333333333333333e-06,
"loss": 0.1528,
"num_input_tokens_seen": 1674880,
"step": 124
},
{
"epoch": 1.607717041800643,
"grad_norm": 11.191144943237305,
"learning_rate": 1.0416666666666667e-06,
"loss": 0.2017,
"num_input_tokens_seen": 1688992,
"step": 125
},
{
"epoch": 1.6205787781350482,
"grad_norm": 10.479023933410645,
"learning_rate": 1.0500000000000001e-06,
"loss": 0.1554,
"num_input_tokens_seen": 1702944,
"step": 126
},
{
"epoch": 1.6334405144694535,
"grad_norm": 12.889703750610352,
"learning_rate": 1.0583333333333335e-06,
"loss": 0.1332,
"num_input_tokens_seen": 1716768,
"step": 127
},
{
"epoch": 1.6463022508038585,
"grad_norm": 8.173032760620117,
"learning_rate": 1.066666666666667e-06,
"loss": 0.115,
"num_input_tokens_seen": 1730848,
"step": 128
},
{
"epoch": 1.6591639871382635,
"grad_norm": 14.694462776184082,
"learning_rate": 1.075e-06,
"loss": 0.119,
"num_input_tokens_seen": 1743520,
"step": 129
},
{
"epoch": 1.6720257234726688,
"grad_norm": 8.472253799438477,
"learning_rate": 1.0833333333333335e-06,
"loss": 0.1164,
"num_input_tokens_seen": 1757696,
"step": 130
},
{
"epoch": 1.684887459807074,
"grad_norm": 20.757648468017578,
"learning_rate": 1.0916666666666667e-06,
"loss": 0.1981,
"num_input_tokens_seen": 1771264,
"step": 131
},
{
"epoch": 1.697749196141479,
"grad_norm": 20.149930953979492,
"learning_rate": 1.1e-06,
"loss": 0.168,
"num_input_tokens_seen": 1784480,
"step": 132
},
{
"epoch": 1.7106109324758842,
"grad_norm": 9.992751121520996,
"learning_rate": 1.1083333333333335e-06,
"loss": 0.0741,
"num_input_tokens_seen": 1798176,
"step": 133
},
{
"epoch": 1.7234726688102895,
"grad_norm": 26.80909538269043,
"learning_rate": 1.1166666666666666e-06,
"loss": 0.1847,
"num_input_tokens_seen": 1811264,
"step": 134
},
{
"epoch": 1.7363344051446945,
"grad_norm": 16.494613647460938,
"learning_rate": 1.125e-06,
"loss": 0.108,
"num_input_tokens_seen": 1825024,
"step": 135
},
{
"epoch": 1.7491961414790995,
"grad_norm": 18.742122650146484,
"learning_rate": 1.1333333333333334e-06,
"loss": 0.1214,
"num_input_tokens_seen": 1838624,
"step": 136
},
{
"epoch": 1.762057877813505,
"grad_norm": 10.102185249328613,
"learning_rate": 1.1416666666666668e-06,
"loss": 0.1252,
"num_input_tokens_seen": 1852288,
"step": 137
},
{
"epoch": 1.77491961414791,
"grad_norm": 23.612730026245117,
"learning_rate": 1.1500000000000002e-06,
"loss": 0.144,
"num_input_tokens_seen": 1865376,
"step": 138
},
{
"epoch": 1.787781350482315,
"grad_norm": 11.11936092376709,
"learning_rate": 1.1583333333333334e-06,
"loss": 0.1269,
"num_input_tokens_seen": 1878784,
"step": 139
},
{
"epoch": 1.8006430868167203,
"grad_norm": 7.341479778289795,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.1283,
"num_input_tokens_seen": 1892320,
"step": 140
},
{
"epoch": 1.8135048231511255,
"grad_norm": 16.097875595092773,
"learning_rate": 1.175e-06,
"loss": 0.0929,
"num_input_tokens_seen": 1905728,
"step": 141
},
{
"epoch": 1.8263665594855305,
"grad_norm": 18.07818603515625,
"learning_rate": 1.1833333333333334e-06,
"loss": 0.1349,
"num_input_tokens_seen": 1918752,
"step": 142
},
{
"epoch": 1.8392282958199357,
"grad_norm": 18.349048614501953,
"learning_rate": 1.1916666666666668e-06,
"loss": 0.1277,
"num_input_tokens_seen": 1931808,
"step": 143
},
{
"epoch": 1.852090032154341,
"grad_norm": 12.308938980102539,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.1585,
"num_input_tokens_seen": 1945312,
"step": 144
},
{
"epoch": 1.864951768488746,
"grad_norm": 19.95519256591797,
"learning_rate": 1.2083333333333333e-06,
"loss": 0.1468,
"num_input_tokens_seen": 1958016,
"step": 145
},
{
"epoch": 1.877813504823151,
"grad_norm": 11.419037818908691,
"learning_rate": 1.2166666666666667e-06,
"loss": 0.1049,
"num_input_tokens_seen": 1971392,
"step": 146
},
{
"epoch": 1.8906752411575563,
"grad_norm": 9.492958068847656,
"learning_rate": 1.2250000000000001e-06,
"loss": 0.1297,
"num_input_tokens_seen": 1984992,
"step": 147
},
{
"epoch": 1.9035369774919615,
"grad_norm": 6.134005546569824,
"learning_rate": 1.2333333333333335e-06,
"loss": 0.1111,
"num_input_tokens_seen": 1998144,
"step": 148
},
{
"epoch": 1.9163987138263665,
"grad_norm": 10.250520706176758,
"learning_rate": 1.2416666666666667e-06,
"loss": 0.1202,
"num_input_tokens_seen": 2012032,
"step": 149
},
{
"epoch": 1.9292604501607717,
"grad_norm": 10.018802642822266,
"learning_rate": 1.25e-06,
"loss": 0.0829,
"num_input_tokens_seen": 2025600,
"step": 150
},
{
"epoch": 1.942122186495177,
"grad_norm": 17.175769805908203,
"learning_rate": 1.2583333333333333e-06,
"loss": 0.1119,
"num_input_tokens_seen": 2040096,
"step": 151
},
{
"epoch": 1.954983922829582,
"grad_norm": 18.159189224243164,
"learning_rate": 1.2666666666666669e-06,
"loss": 0.1144,
"num_input_tokens_seen": 2053504,
"step": 152
},
{
"epoch": 1.967845659163987,
"grad_norm": 14.536725044250488,
"learning_rate": 1.275e-06,
"loss": 0.117,
"num_input_tokens_seen": 2065856,
"step": 153
},
{
"epoch": 1.9807073954983923,
"grad_norm": 10.24523639678955,
"learning_rate": 1.2833333333333335e-06,
"loss": 0.0998,
"num_input_tokens_seen": 2078656,
"step": 154
},
{
"epoch": 1.9935691318327975,
"grad_norm": 20.030460357666016,
"learning_rate": 1.2916666666666669e-06,
"loss": 0.1384,
"num_input_tokens_seen": 2092672,
"step": 155
},
{
"epoch": 2.0064308681672025,
"grad_norm": 12.654150009155273,
"learning_rate": 1.3e-06,
"loss": 0.1157,
"num_input_tokens_seen": 2105728,
"step": 156
},
{
"epoch": 2.0192926045016075,
"grad_norm": 5.51693058013916,
"learning_rate": 1.3083333333333334e-06,
"loss": 0.0696,
"num_input_tokens_seen": 2118976,
"step": 157
},
{
"epoch": 2.032154340836013,
"grad_norm": 9.432037353515625,
"learning_rate": 1.3166666666666666e-06,
"loss": 0.0665,
"num_input_tokens_seen": 2132352,
"step": 158
},
{
"epoch": 2.045016077170418,
"grad_norm": 15.199766159057617,
"learning_rate": 1.3250000000000002e-06,
"loss": 0.0783,
"num_input_tokens_seen": 2146048,
"step": 159
},
{
"epoch": 2.057877813504823,
"grad_norm": 10.740960121154785,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0749,
"num_input_tokens_seen": 2158752,
"step": 160
},
{
"epoch": 2.0707395498392285,
"grad_norm": 7.335105895996094,
"learning_rate": 1.3416666666666666e-06,
"loss": 0.0731,
"num_input_tokens_seen": 2171872,
"step": 161
},
{
"epoch": 2.0836012861736335,
"grad_norm": 14.225622177124023,
"learning_rate": 1.3500000000000002e-06,
"loss": 0.0913,
"num_input_tokens_seen": 2184800,
"step": 162
},
{
"epoch": 2.0964630225080385,
"grad_norm": 12.737798690795898,
"learning_rate": 1.3583333333333334e-06,
"loss": 0.0521,
"num_input_tokens_seen": 2198176,
"step": 163
},
{
"epoch": 2.1093247588424435,
"grad_norm": 10.609269142150879,
"learning_rate": 1.3666666666666668e-06,
"loss": 0.068,
"num_input_tokens_seen": 2211392,
"step": 164
},
{
"epoch": 2.122186495176849,
"grad_norm": 8.30199146270752,
"learning_rate": 1.3750000000000002e-06,
"loss": 0.0686,
"num_input_tokens_seen": 2224480,
"step": 165
},
{
"epoch": 2.135048231511254,
"grad_norm": 6.069125175476074,
"learning_rate": 1.3833333333333336e-06,
"loss": 0.0545,
"num_input_tokens_seen": 2237920,
"step": 166
},
{
"epoch": 2.147909967845659,
"grad_norm": 4.639097213745117,
"learning_rate": 1.3916666666666668e-06,
"loss": 0.0347,
"num_input_tokens_seen": 2251008,
"step": 167
},
{
"epoch": 2.1607717041800645,
"grad_norm": 9.093743324279785,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.0993,
"num_input_tokens_seen": 2264448,
"step": 168
},
{
"epoch": 2.1736334405144695,
"grad_norm": 8.8851900100708,
"learning_rate": 1.4083333333333335e-06,
"loss": 0.1059,
"num_input_tokens_seen": 2277568,
"step": 169
},
{
"epoch": 2.1864951768488745,
"grad_norm": 10.823246955871582,
"learning_rate": 1.4166666666666667e-06,
"loss": 0.089,
"num_input_tokens_seen": 2291168,
"step": 170
},
{
"epoch": 2.19935691318328,
"grad_norm": 5.454392910003662,
"learning_rate": 1.425e-06,
"loss": 0.0379,
"num_input_tokens_seen": 2304192,
"step": 171
},
{
"epoch": 2.212218649517685,
"grad_norm": 6.2786030769348145,
"learning_rate": 1.4333333333333335e-06,
"loss": 0.0626,
"num_input_tokens_seen": 2317920,
"step": 172
},
{
"epoch": 2.22508038585209,
"grad_norm": 8.764283180236816,
"learning_rate": 1.4416666666666667e-06,
"loss": 0.0957,
"num_input_tokens_seen": 2332416,
"step": 173
},
{
"epoch": 2.237942122186495,
"grad_norm": 10.533244132995605,
"learning_rate": 1.45e-06,
"loss": 0.0636,
"num_input_tokens_seen": 2346112,
"step": 174
},
{
"epoch": 2.2508038585209005,
"grad_norm": 7.398101329803467,
"learning_rate": 1.4583333333333335e-06,
"loss": 0.074,
"num_input_tokens_seen": 2359488,
"step": 175
},
{
"epoch": 2.2636655948553055,
"grad_norm": 6.4459686279296875,
"learning_rate": 1.4666666666666669e-06,
"loss": 0.0685,
"num_input_tokens_seen": 2372928,
"step": 176
},
{
"epoch": 2.2765273311897105,
"grad_norm": 5.918321132659912,
"learning_rate": 1.475e-06,
"loss": 0.0574,
"num_input_tokens_seen": 2386016,
"step": 177
},
{
"epoch": 2.289389067524116,
"grad_norm": 7.045689582824707,
"learning_rate": 1.4833333333333337e-06,
"loss": 0.0619,
"num_input_tokens_seen": 2399936,
"step": 178
},
{
"epoch": 2.302250803858521,
"grad_norm": 9.59676742553711,
"learning_rate": 1.4916666666666669e-06,
"loss": 0.0683,
"num_input_tokens_seen": 2412480,
"step": 179
},
{
"epoch": 2.315112540192926,
"grad_norm": 7.824158668518066,
"learning_rate": 1.5e-06,
"loss": 0.07,
"num_input_tokens_seen": 2425888,
"step": 180
},
{
"epoch": 2.327974276527331,
"grad_norm": 10.920543670654297,
"learning_rate": 1.5083333333333336e-06,
"loss": 0.1154,
"num_input_tokens_seen": 2439168,
"step": 181
},
{
"epoch": 2.3408360128617365,
"grad_norm": 7.014737606048584,
"learning_rate": 1.5166666666666668e-06,
"loss": 0.0923,
"num_input_tokens_seen": 2452928,
"step": 182
},
{
"epoch": 2.3536977491961415,
"grad_norm": 7.072991371154785,
"learning_rate": 1.525e-06,
"loss": 0.0777,
"num_input_tokens_seen": 2465760,
"step": 183
},
{
"epoch": 2.3665594855305465,
"grad_norm": 13.426029205322266,
"learning_rate": 1.5333333333333334e-06,
"loss": 0.0754,
"num_input_tokens_seen": 2479744,
"step": 184
},
{
"epoch": 2.379421221864952,
"grad_norm": 9.223447799682617,
"learning_rate": 1.5416666666666668e-06,
"loss": 0.0704,
"num_input_tokens_seen": 2493024,
"step": 185
},
{
"epoch": 2.392282958199357,
"grad_norm": 6.772041320800781,
"learning_rate": 1.5500000000000002e-06,
"loss": 0.0915,
"num_input_tokens_seen": 2506720,
"step": 186
},
{
"epoch": 2.405144694533762,
"grad_norm": 7.140571117401123,
"learning_rate": 1.5583333333333334e-06,
"loss": 0.087,
"num_input_tokens_seen": 2519776,
"step": 187
},
{
"epoch": 2.418006430868167,
"grad_norm": 9.75447940826416,
"learning_rate": 1.566666666666667e-06,
"loss": 0.0566,
"num_input_tokens_seen": 2533536,
"step": 188
},
{
"epoch": 2.4308681672025725,
"grad_norm": 12.131728172302246,
"learning_rate": 1.5750000000000002e-06,
"loss": 0.1037,
"num_input_tokens_seen": 2547520,
"step": 189
},
{
"epoch": 2.4437299035369775,
"grad_norm": 10.324275970458984,
"learning_rate": 1.5833333333333333e-06,
"loss": 0.1143,
"num_input_tokens_seen": 2561280,
"step": 190
},
{
"epoch": 2.4565916398713825,
"grad_norm": 15.137426376342773,
"learning_rate": 1.591666666666667e-06,
"loss": 0.0829,
"num_input_tokens_seen": 2575136,
"step": 191
},
{
"epoch": 2.469453376205788,
"grad_norm": 4.877906322479248,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.0422,
"num_input_tokens_seen": 2588736,
"step": 192
},
{
"epoch": 2.482315112540193,
"grad_norm": 6.23721170425415,
"learning_rate": 1.6083333333333333e-06,
"loss": 0.0727,
"num_input_tokens_seen": 2602400,
"step": 193
},
{
"epoch": 2.495176848874598,
"grad_norm": 10.653412818908691,
"learning_rate": 1.6166666666666667e-06,
"loss": 0.0836,
"num_input_tokens_seen": 2615648,
"step": 194
},
{
"epoch": 2.508038585209003,
"grad_norm": 8.391611099243164,
"learning_rate": 1.6250000000000001e-06,
"loss": 0.0803,
"num_input_tokens_seen": 2629024,
"step": 195
},
{
"epoch": 2.5209003215434085,
"grad_norm": 6.826237201690674,
"learning_rate": 1.6333333333333335e-06,
"loss": 0.0654,
"num_input_tokens_seen": 2642784,
"step": 196
},
{
"epoch": 2.5337620578778135,
"grad_norm": 7.795334339141846,
"learning_rate": 1.6416666666666667e-06,
"loss": 0.0587,
"num_input_tokens_seen": 2656064,
"step": 197
},
{
"epoch": 2.5466237942122185,
"grad_norm": 9.136542320251465,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.0848,
"num_input_tokens_seen": 2669472,
"step": 198
},
{
"epoch": 2.559485530546624,
"grad_norm": 4.476882457733154,
"learning_rate": 1.6583333333333335e-06,
"loss": 0.0525,
"num_input_tokens_seen": 2682944,
"step": 199
},
{
"epoch": 2.572347266881029,
"grad_norm": 8.298385620117188,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.0677,
"num_input_tokens_seen": 2695968,
"step": 200
},
{
"epoch": 2.585209003215434,
"grad_norm": 9.084216117858887,
"learning_rate": 1.6750000000000003e-06,
"loss": 0.062,
"num_input_tokens_seen": 2709504,
"step": 201
},
{
"epoch": 2.598070739549839,
"grad_norm": 10.124899864196777,
"learning_rate": 1.6833333333333335e-06,
"loss": 0.0674,
"num_input_tokens_seen": 2723264,
"step": 202
},
{
"epoch": 2.6109324758842445,
"grad_norm": 5.721729755401611,
"learning_rate": 1.6916666666666666e-06,
"loss": 0.0533,
"num_input_tokens_seen": 2736768,
"step": 203
},
{
"epoch": 2.6237942122186495,
"grad_norm": 6.966607570648193,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.0757,
"num_input_tokens_seen": 2750816,
"step": 204
},
{
"epoch": 2.6366559485530545,
"grad_norm": 12.730990409851074,
"learning_rate": 1.7083333333333334e-06,
"loss": 0.0777,
"num_input_tokens_seen": 2764576,
"step": 205
},
{
"epoch": 2.64951768488746,
"grad_norm": 13.19457721710205,
"learning_rate": 1.7166666666666668e-06,
"loss": 0.0921,
"num_input_tokens_seen": 2778080,
"step": 206
},
{
"epoch": 2.662379421221865,
"grad_norm": 6.583987712860107,
"learning_rate": 1.725e-06,
"loss": 0.0378,
"num_input_tokens_seen": 2790976,
"step": 207
},
{
"epoch": 2.67524115755627,
"grad_norm": 6.989199161529541,
"learning_rate": 1.7333333333333336e-06,
"loss": 0.0671,
"num_input_tokens_seen": 2805152,
"step": 208
},
{
"epoch": 2.688102893890675,
"grad_norm": 8.903035163879395,
"learning_rate": 1.7416666666666668e-06,
"loss": 0.0664,
"num_input_tokens_seen": 2818688,
"step": 209
},
{
"epoch": 2.7009646302250805,
"grad_norm": 10.817940711975098,
"learning_rate": 1.75e-06,
"loss": 0.072,
"num_input_tokens_seen": 2831872,
"step": 210
},
{
"epoch": 2.7138263665594855,
"grad_norm": 9.068681716918945,
"learning_rate": 1.7583333333333336e-06,
"loss": 0.0883,
"num_input_tokens_seen": 2845984,
"step": 211
},
{
"epoch": 2.7266881028938905,
"grad_norm": 4.464389801025391,
"learning_rate": 1.7666666666666668e-06,
"loss": 0.0414,
"num_input_tokens_seen": 2859232,
"step": 212
},
{
"epoch": 2.739549839228296,
"grad_norm": 4.999591827392578,
"learning_rate": 1.7750000000000002e-06,
"loss": 0.031,
"num_input_tokens_seen": 2872192,
"step": 213
},
{
"epoch": 2.752411575562701,
"grad_norm": 6.336106300354004,
"learning_rate": 1.7833333333333336e-06,
"loss": 0.0634,
"num_input_tokens_seen": 2885312,
"step": 214
},
{
"epoch": 2.765273311897106,
"grad_norm": 7.798391819000244,
"learning_rate": 1.7916666666666667e-06,
"loss": 0.0837,
"num_input_tokens_seen": 2899072,
"step": 215
},
{
"epoch": 2.778135048231511,
"grad_norm": 8.36534595489502,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.0855,
"num_input_tokens_seen": 2912448,
"step": 216
},
{
"epoch": 2.7909967845659165,
"grad_norm": 9.043126106262207,
"learning_rate": 1.8083333333333335e-06,
"loss": 0.0945,
"num_input_tokens_seen": 2925120,
"step": 217
},
{
"epoch": 2.8038585209003215,
"grad_norm": 5.886596202850342,
"learning_rate": 1.816666666666667e-06,
"loss": 0.078,
"num_input_tokens_seen": 2938336,
"step": 218
},
{
"epoch": 2.816720257234727,
"grad_norm": 6.200281620025635,
"learning_rate": 1.825e-06,
"loss": 0.0573,
"num_input_tokens_seen": 2951264,
"step": 219
},
{
"epoch": 2.829581993569132,
"grad_norm": 10.755176544189453,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.0806,
"num_input_tokens_seen": 2965120,
"step": 220
},
{
"epoch": 2.842443729903537,
"grad_norm": 7.736689567565918,
"learning_rate": 1.8416666666666669e-06,
"loss": 0.0961,
"num_input_tokens_seen": 2978368,
"step": 221
},
{
"epoch": 2.855305466237942,
"grad_norm": 8.25919246673584,
"learning_rate": 1.85e-06,
"loss": 0.0732,
"num_input_tokens_seen": 2992000,
"step": 222
},
{
"epoch": 2.868167202572347,
"grad_norm": 9.59769344329834,
"learning_rate": 1.8583333333333335e-06,
"loss": 0.0957,
"num_input_tokens_seen": 3006048,
"step": 223
},
{
"epoch": 2.8810289389067525,
"grad_norm": 7.2752485275268555,
"learning_rate": 1.8666666666666669e-06,
"loss": 0.0774,
"num_input_tokens_seen": 3019744,
"step": 224
},
{
"epoch": 2.8938906752411575,
"grad_norm": 5.989022731781006,
"learning_rate": 1.8750000000000003e-06,
"loss": 0.0691,
"num_input_tokens_seen": 3033568,
"step": 225
},
{
"epoch": 2.906752411575563,
"grad_norm": 6.692146301269531,
"learning_rate": 1.8833333333333334e-06,
"loss": 0.0529,
"num_input_tokens_seen": 3047712,
"step": 226
},
{
"epoch": 2.919614147909968,
"grad_norm": 7.606103897094727,
"learning_rate": 1.8916666666666668e-06,
"loss": 0.0811,
"num_input_tokens_seen": 3060800,
"step": 227
},
{
"epoch": 2.932475884244373,
"grad_norm": 8.168295860290527,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.1211,
"num_input_tokens_seen": 3074720,
"step": 228
},
{
"epoch": 2.945337620578778,
"grad_norm": 5.379516124725342,
"learning_rate": 1.9083333333333334e-06,
"loss": 0.0489,
"num_input_tokens_seen": 3088448,
"step": 229
},
{
"epoch": 2.958199356913183,
"grad_norm": 11.40697956085205,
"learning_rate": 1.916666666666667e-06,
"loss": 0.0947,
"num_input_tokens_seen": 3103008,
"step": 230
},
{
"epoch": 2.9710610932475885,
"grad_norm": 6.254599571228027,
"learning_rate": 1.925e-06,
"loss": 0.0561,
"num_input_tokens_seen": 3116320,
"step": 231
},
{
"epoch": 2.9839228295819935,
"grad_norm": 6.7563300132751465,
"learning_rate": 1.9333333333333336e-06,
"loss": 0.0629,
"num_input_tokens_seen": 3130592,
"step": 232
},
{
"epoch": 2.996784565916399,
"grad_norm": 3.745004177093506,
"learning_rate": 1.9416666666666666e-06,
"loss": 0.0579,
"num_input_tokens_seen": 3144000,
"step": 233
},
{
"epoch": 3.009646302250804,
"grad_norm": 3.5915324687957764,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.0285,
"num_input_tokens_seen": 3157472,
"step": 234
},
{
"epoch": 3.022508038585209,
"grad_norm": 2.564159631729126,
"learning_rate": 1.9583333333333334e-06,
"loss": 0.0256,
"num_input_tokens_seen": 3170656,
"step": 235
},
{
"epoch": 3.035369774919614,
"grad_norm": 5.488891124725342,
"learning_rate": 1.9666666666666668e-06,
"loss": 0.0247,
"num_input_tokens_seen": 3184320,
"step": 236
},
{
"epoch": 3.0482315112540195,
"grad_norm": 4.964622497558594,
"learning_rate": 1.975e-06,
"loss": 0.0325,
"num_input_tokens_seen": 3197216,
"step": 237
},
{
"epoch": 3.0610932475884245,
"grad_norm": 5.399533748626709,
"learning_rate": 1.9833333333333335e-06,
"loss": 0.0172,
"num_input_tokens_seen": 3211008,
"step": 238
},
{
"epoch": 3.0739549839228295,
"grad_norm": 5.841439247131348,
"learning_rate": 1.991666666666667e-06,
"loss": 0.05,
"num_input_tokens_seen": 3225152,
"step": 239
},
{
"epoch": 3.0868167202572345,
"grad_norm": 3.352426052093506,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0134,
"num_input_tokens_seen": 3238752,
"step": 240
},
{
"epoch": 3.09967845659164,
"grad_norm": 8.727036476135254,
"learning_rate": 2.0083333333333337e-06,
"loss": 0.0434,
"num_input_tokens_seen": 3252224,
"step": 241
},
{
"epoch": 3.112540192926045,
"grad_norm": 6.6712727546691895,
"learning_rate": 2.0166666666666667e-06,
"loss": 0.0186,
"num_input_tokens_seen": 3264832,
"step": 242
},
{
"epoch": 3.12540192926045,
"grad_norm": 5.2496490478515625,
"learning_rate": 2.025e-06,
"loss": 0.0341,
"num_input_tokens_seen": 3278048,
"step": 243
},
{
"epoch": 3.1382636655948555,
"grad_norm": 6.601498126983643,
"learning_rate": 2.0333333333333335e-06,
"loss": 0.0386,
"num_input_tokens_seen": 3291680,
"step": 244
},
{
"epoch": 3.1511254019292605,
"grad_norm": 5.705418109893799,
"learning_rate": 2.041666666666667e-06,
"loss": 0.0389,
"num_input_tokens_seen": 3305440,
"step": 245
},
{
"epoch": 3.1639871382636655,
"grad_norm": 4.315848350524902,
"learning_rate": 2.05e-06,
"loss": 0.0227,
"num_input_tokens_seen": 3319360,
"step": 246
},
{
"epoch": 3.176848874598071,
"grad_norm": 3.5968470573425293,
"learning_rate": 2.0583333333333337e-06,
"loss": 0.0317,
"num_input_tokens_seen": 3333088,
"step": 247
},
{
"epoch": 3.189710610932476,
"grad_norm": 3.23504638671875,
"learning_rate": 2.0666666666666666e-06,
"loss": 0.0335,
"num_input_tokens_seen": 3345856,
"step": 248
},
{
"epoch": 3.202572347266881,
"grad_norm": 3.6574056148529053,
"learning_rate": 2.075e-06,
"loss": 0.0257,
"num_input_tokens_seen": 3358880,
"step": 249
},
{
"epoch": 3.215434083601286,
"grad_norm": 2.903745651245117,
"learning_rate": 2.0833333333333334e-06,
"loss": 0.0244,
"num_input_tokens_seen": 3373312,
"step": 250
},
{
"epoch": 3.2282958199356915,
"grad_norm": 5.933866024017334,
"learning_rate": 2.091666666666667e-06,
"loss": 0.0285,
"num_input_tokens_seen": 3386336,
"step": 251
},
{
"epoch": 3.2411575562700965,
"grad_norm": 2.2281062602996826,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.0093,
"num_input_tokens_seen": 3399904,
"step": 252
},
{
"epoch": 3.2540192926045015,
"grad_norm": 5.6479573249816895,
"learning_rate": 2.1083333333333336e-06,
"loss": 0.0415,
"num_input_tokens_seen": 3412896,
"step": 253
},
{
"epoch": 3.266881028938907,
"grad_norm": 3.5148093700408936,
"learning_rate": 2.116666666666667e-06,
"loss": 0.0239,
"num_input_tokens_seen": 3426336,
"step": 254
},
{
"epoch": 3.279742765273312,
"grad_norm": 8.129992485046387,
"learning_rate": 2.125e-06,
"loss": 0.0412,
"num_input_tokens_seen": 3439680,
"step": 255
},
{
"epoch": 3.292604501607717,
"grad_norm": 10.018027305603027,
"learning_rate": 2.133333333333334e-06,
"loss": 0.0503,
"num_input_tokens_seen": 3453760,
"step": 256
},
{
"epoch": 3.305466237942122,
"grad_norm": 1.7141941785812378,
"learning_rate": 2.1416666666666668e-06,
"loss": 0.0046,
"num_input_tokens_seen": 3466496,
"step": 257
},
{
"epoch": 3.3183279742765275,
"grad_norm": 4.564700126647949,
"learning_rate": 2.15e-06,
"loss": 0.041,
"num_input_tokens_seen": 3480352,
"step": 258
},
{
"epoch": 3.3311897106109325,
"grad_norm": 5.49994421005249,
"learning_rate": 2.1583333333333336e-06,
"loss": 0.0257,
"num_input_tokens_seen": 3494112,
"step": 259
},
{
"epoch": 3.3440514469453375,
"grad_norm": 4.59380578994751,
"learning_rate": 2.166666666666667e-06,
"loss": 0.0168,
"num_input_tokens_seen": 3507520,
"step": 260
},
{
"epoch": 3.356913183279743,
"grad_norm": 5.633969306945801,
"learning_rate": 2.1750000000000004e-06,
"loss": 0.0439,
"num_input_tokens_seen": 3520352,
"step": 261
},
{
"epoch": 3.369774919614148,
"grad_norm": 7.95892333984375,
"learning_rate": 2.1833333333333333e-06,
"loss": 0.0204,
"num_input_tokens_seen": 3533984,
"step": 262
},
{
"epoch": 3.382636655948553,
"grad_norm": 4.728748798370361,
"learning_rate": 2.191666666666667e-06,
"loss": 0.0284,
"num_input_tokens_seen": 3547072,
"step": 263
},
{
"epoch": 3.395498392282958,
"grad_norm": 6.725067615509033,
"learning_rate": 2.2e-06,
"loss": 0.0684,
"num_input_tokens_seen": 3559872,
"step": 264
},
{
"epoch": 3.4083601286173635,
"grad_norm": 10.331790924072266,
"learning_rate": 2.2083333333333335e-06,
"loss": 0.0479,
"num_input_tokens_seen": 3572832,
"step": 265
},
{
"epoch": 3.4212218649517685,
"grad_norm": 5.8449296951293945,
"learning_rate": 2.216666666666667e-06,
"loss": 0.0434,
"num_input_tokens_seen": 3585792,
"step": 266
},
{
"epoch": 3.4340836012861735,
"grad_norm": 4.554612636566162,
"learning_rate": 2.2250000000000003e-06,
"loss": 0.0213,
"num_input_tokens_seen": 3599392,
"step": 267
},
{
"epoch": 3.446945337620579,
"grad_norm": 7.6923828125,
"learning_rate": 2.2333333333333333e-06,
"loss": 0.0415,
"num_input_tokens_seen": 3613088,
"step": 268
},
{
"epoch": 3.459807073954984,
"grad_norm": 5.714204788208008,
"learning_rate": 2.2416666666666667e-06,
"loss": 0.0404,
"num_input_tokens_seen": 3626304,
"step": 269
},
{
"epoch": 3.472668810289389,
"grad_norm": 5.449560165405273,
"learning_rate": 2.25e-06,
"loss": 0.0566,
"num_input_tokens_seen": 3639968,
"step": 270
},
{
"epoch": 3.485530546623794,
"grad_norm": 4.743899345397949,
"learning_rate": 2.2583333333333335e-06,
"loss": 0.0509,
"num_input_tokens_seen": 3653472,
"step": 271
},
{
"epoch": 3.4983922829581995,
"grad_norm": 4.767292022705078,
"learning_rate": 2.266666666666667e-06,
"loss": 0.0385,
"num_input_tokens_seen": 3667104,
"step": 272
},
{
"epoch": 3.5112540192926045,
"grad_norm": 2.342560291290283,
"learning_rate": 2.2750000000000002e-06,
"loss": 0.0225,
"num_input_tokens_seen": 3680480,
"step": 273
},
{
"epoch": 3.5241157556270095,
"grad_norm": 3.9172496795654297,
"learning_rate": 2.2833333333333336e-06,
"loss": 0.0255,
"num_input_tokens_seen": 3693568,
"step": 274
},
{
"epoch": 3.536977491961415,
"grad_norm": 6.793405532836914,
"learning_rate": 2.2916666666666666e-06,
"loss": 0.0531,
"num_input_tokens_seen": 3706496,
"step": 275
},
{
"epoch": 3.54983922829582,
"grad_norm": 2.142134189605713,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.0095,
"num_input_tokens_seen": 3720352,
"step": 276
},
{
"epoch": 3.562700964630225,
"grad_norm": 6.340362071990967,
"learning_rate": 2.3083333333333334e-06,
"loss": 0.0229,
"num_input_tokens_seen": 3734176,
"step": 277
},
{
"epoch": 3.57556270096463,
"grad_norm": 5.204097747802734,
"learning_rate": 2.316666666666667e-06,
"loss": 0.038,
"num_input_tokens_seen": 3748544,
"step": 278
},
{
"epoch": 3.5884244372990355,
"grad_norm": 6.532016754150391,
"learning_rate": 2.325e-06,
"loss": 0.0316,
"num_input_tokens_seen": 3761984,
"step": 279
},
{
"epoch": 3.6012861736334405,
"grad_norm": 10.49596881866455,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.0861,
"num_input_tokens_seen": 3774304,
"step": 280
},
{
"epoch": 3.6141479099678455,
"grad_norm": 5.68178129196167,
"learning_rate": 2.341666666666667e-06,
"loss": 0.0566,
"num_input_tokens_seen": 3788416,
"step": 281
},
{
"epoch": 3.627009646302251,
"grad_norm": 6.790673732757568,
"learning_rate": 2.35e-06,
"loss": 0.0804,
"num_input_tokens_seen": 3802112,
"step": 282
},
{
"epoch": 3.639871382636656,
"grad_norm": 7.294980049133301,
"learning_rate": 2.3583333333333338e-06,
"loss": 0.046,
"num_input_tokens_seen": 3815968,
"step": 283
},
{
"epoch": 3.652733118971061,
"grad_norm": 10.37634563446045,
"learning_rate": 2.3666666666666667e-06,
"loss": 0.0693,
"num_input_tokens_seen": 3829248,
"step": 284
},
{
"epoch": 3.665594855305466,
"grad_norm": 4.692931652069092,
"learning_rate": 2.375e-06,
"loss": 0.0342,
"num_input_tokens_seen": 3843072,
"step": 285
},
{
"epoch": 3.6784565916398715,
"grad_norm": 3.5910987854003906,
"learning_rate": 2.3833333333333335e-06,
"loss": 0.0479,
"num_input_tokens_seen": 3856288,
"step": 286
},
{
"epoch": 3.6913183279742765,
"grad_norm": 5.858832359313965,
"learning_rate": 2.391666666666667e-06,
"loss": 0.0388,
"num_input_tokens_seen": 3870240,
"step": 287
},
{
"epoch": 3.7041800643086815,
"grad_norm": 4.469853401184082,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.0274,
"num_input_tokens_seen": 3884096,
"step": 288
},
{
"epoch": 3.717041800643087,
"grad_norm": 4.967989921569824,
"learning_rate": 2.4083333333333337e-06,
"loss": 0.0259,
"num_input_tokens_seen": 3898368,
"step": 289
},
{
"epoch": 3.729903536977492,
"grad_norm": 6.155540943145752,
"learning_rate": 2.4166666666666667e-06,
"loss": 0.0367,
"num_input_tokens_seen": 3911200,
"step": 290
},
{
"epoch": 3.742765273311897,
"grad_norm": 6.5608367919921875,
"learning_rate": 2.425e-06,
"loss": 0.0661,
"num_input_tokens_seen": 3924320,
"step": 291
},
{
"epoch": 3.755627009646302,
"grad_norm": 6.416184902191162,
"learning_rate": 2.4333333333333335e-06,
"loss": 0.0466,
"num_input_tokens_seen": 3937632,
"step": 292
},
{
"epoch": 3.7684887459807075,
"grad_norm": 5.672483444213867,
"learning_rate": 2.441666666666667e-06,
"loss": 0.0286,
"num_input_tokens_seen": 3951040,
"step": 293
},
{
"epoch": 3.7813504823151125,
"grad_norm": 9.215237617492676,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.0586,
"num_input_tokens_seen": 3963936,
"step": 294
},
{
"epoch": 3.7942122186495175,
"grad_norm": 5.630943298339844,
"learning_rate": 2.4583333333333332e-06,
"loss": 0.0329,
"num_input_tokens_seen": 3976832,
"step": 295
},
{
"epoch": 3.807073954983923,
"grad_norm": 8.257805824279785,
"learning_rate": 2.466666666666667e-06,
"loss": 0.0582,
"num_input_tokens_seen": 3990592,
"step": 296
},
{
"epoch": 3.819935691318328,
"grad_norm": 3.351003408432007,
"learning_rate": 2.475e-06,
"loss": 0.0312,
"num_input_tokens_seen": 4004288,
"step": 297
},
{
"epoch": 3.832797427652733,
"grad_norm": 3.2983040809631348,
"learning_rate": 2.4833333333333334e-06,
"loss": 0.0329,
"num_input_tokens_seen": 4017856,
"step": 298
},
{
"epoch": 3.845659163987138,
"grad_norm": 4.082300662994385,
"learning_rate": 2.491666666666667e-06,
"loss": 0.0206,
"num_input_tokens_seen": 4031168,
"step": 299
},
{
"epoch": 3.8585209003215435,
"grad_norm": 3.222336530685425,
"learning_rate": 2.5e-06,
"loss": 0.0426,
"num_input_tokens_seen": 4044416,
"step": 300
},
{
"epoch": 3.8713826366559485,
"grad_norm": 3.699768304824829,
"learning_rate": 2.5083333333333336e-06,
"loss": 0.0179,
"num_input_tokens_seen": 4057824,
"step": 301
},
{
"epoch": 3.884244372990354,
"grad_norm": 2.819528341293335,
"learning_rate": 2.5166666666666666e-06,
"loss": 0.0289,
"num_input_tokens_seen": 4071936,
"step": 302
},
{
"epoch": 3.897106109324759,
"grad_norm": 4.0458478927612305,
"learning_rate": 2.5250000000000004e-06,
"loss": 0.0303,
"num_input_tokens_seen": 4085536,
"step": 303
},
{
"epoch": 3.909967845659164,
"grad_norm": 11.214617729187012,
"learning_rate": 2.5333333333333338e-06,
"loss": 0.046,
"num_input_tokens_seen": 4099552,
"step": 304
},
{
"epoch": 3.922829581993569,
"grad_norm": 6.758171081542969,
"learning_rate": 2.5416666666666668e-06,
"loss": 0.0523,
"num_input_tokens_seen": 4112736,
"step": 305
},
{
"epoch": 3.935691318327974,
"grad_norm": 5.622665882110596,
"learning_rate": 2.55e-06,
"loss": 0.0329,
"num_input_tokens_seen": 4126080,
"step": 306
},
{
"epoch": 3.9485530546623795,
"grad_norm": 1.4951086044311523,
"learning_rate": 2.558333333333334e-06,
"loss": 0.0072,
"num_input_tokens_seen": 4138912,
"step": 307
},
{
"epoch": 3.9614147909967845,
"grad_norm": 5.639418601989746,
"learning_rate": 2.566666666666667e-06,
"loss": 0.0415,
"num_input_tokens_seen": 4152768,
"step": 308
},
{
"epoch": 3.97427652733119,
"grad_norm": 7.496403694152832,
"learning_rate": 2.5750000000000003e-06,
"loss": 0.0233,
"num_input_tokens_seen": 4166528,
"step": 309
},
{
"epoch": 3.987138263665595,
"grad_norm": 8.74985408782959,
"learning_rate": 2.5833333333333337e-06,
"loss": 0.0423,
"num_input_tokens_seen": 4180832,
"step": 310
},
{
"epoch": 4.0,
"grad_norm": 6.229910850524902,
"learning_rate": 2.5916666666666667e-06,
"loss": 0.0295,
"num_input_tokens_seen": 4194048,
"step": 311
},
{
"epoch": 4.012861736334405,
"grad_norm": 3.404331922531128,
"learning_rate": 2.6e-06,
"loss": 0.0327,
"num_input_tokens_seen": 4207040,
"step": 312
},
{
"epoch": 4.02572347266881,
"grad_norm": 4.214801788330078,
"learning_rate": 2.608333333333333e-06,
"loss": 0.0301,
"num_input_tokens_seen": 4220000,
"step": 313
},
{
"epoch": 4.038585209003215,
"grad_norm": 6.241875171661377,
"learning_rate": 2.616666666666667e-06,
"loss": 0.0301,
"num_input_tokens_seen": 4233344,
"step": 314
},
{
"epoch": 4.051446945337621,
"grad_norm": 3.2021894454956055,
"learning_rate": 2.6250000000000003e-06,
"loss": 0.0281,
"num_input_tokens_seen": 4246144,
"step": 315
},
{
"epoch": 4.064308681672026,
"grad_norm": 3.5197951793670654,
"learning_rate": 2.6333333333333332e-06,
"loss": 0.0136,
"num_input_tokens_seen": 4260992,
"step": 316
},
{
"epoch": 4.077170418006431,
"grad_norm": 4.2946319580078125,
"learning_rate": 2.6416666666666666e-06,
"loss": 0.0219,
"num_input_tokens_seen": 4274112,
"step": 317
},
{
"epoch": 4.090032154340836,
"grad_norm": 1.3583834171295166,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0044,
"num_input_tokens_seen": 4287904,
"step": 318
},
{
"epoch": 4.102893890675241,
"grad_norm": 5.484881401062012,
"learning_rate": 2.6583333333333334e-06,
"loss": 0.0335,
"num_input_tokens_seen": 4301568,
"step": 319
},
{
"epoch": 4.115755627009646,
"grad_norm": 3.5873327255249023,
"learning_rate": 2.666666666666667e-06,
"loss": 0.0053,
"num_input_tokens_seen": 4314848,
"step": 320
},
{
"epoch": 4.128617363344051,
"grad_norm": 2.954930067062378,
"learning_rate": 2.6750000000000002e-06,
"loss": 0.0196,
"num_input_tokens_seen": 4327456,
"step": 321
},
{
"epoch": 4.141479099678457,
"grad_norm": 2.628854274749756,
"learning_rate": 2.683333333333333e-06,
"loss": 0.0309,
"num_input_tokens_seen": 4341024,
"step": 322
},
{
"epoch": 4.154340836012862,
"grad_norm": 6.691638469696045,
"learning_rate": 2.691666666666667e-06,
"loss": 0.0382,
"num_input_tokens_seen": 4353824,
"step": 323
},
{
"epoch": 4.167202572347267,
"grad_norm": 6.866171360015869,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.046,
"num_input_tokens_seen": 4367616,
"step": 324
},
{
"epoch": 4.180064308681672,
"grad_norm": 4.016347408294678,
"learning_rate": 2.7083333333333334e-06,
"loss": 0.0133,
"num_input_tokens_seen": 4380448,
"step": 325
},
{
"epoch": 4.192926045016077,
"grad_norm": 4.948652744293213,
"learning_rate": 2.7166666666666668e-06,
"loss": 0.0265,
"num_input_tokens_seen": 4393920,
"step": 326
},
{
"epoch": 4.205787781350482,
"grad_norm": 2.7615582942962646,
"learning_rate": 2.7250000000000006e-06,
"loss": 0.0084,
"num_input_tokens_seen": 4407264,
"step": 327
},
{
"epoch": 4.218649517684887,
"grad_norm": 3.0052027702331543,
"learning_rate": 2.7333333333333336e-06,
"loss": 0.0382,
"num_input_tokens_seen": 4420352,
"step": 328
},
{
"epoch": 4.231511254019293,
"grad_norm": 3.0679566860198975,
"learning_rate": 2.741666666666667e-06,
"loss": 0.0101,
"num_input_tokens_seen": 4434688,
"step": 329
},
{
"epoch": 4.244372990353698,
"grad_norm": 4.612461566925049,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0174,
"num_input_tokens_seen": 4448160,
"step": 330
},
{
"epoch": 4.257234726688103,
"grad_norm": 4.299385070800781,
"learning_rate": 2.7583333333333333e-06,
"loss": 0.023,
"num_input_tokens_seen": 4461184,
"step": 331
},
{
"epoch": 4.270096463022508,
"grad_norm": 1.5578008890151978,
"learning_rate": 2.766666666666667e-06,
"loss": 0.0162,
"num_input_tokens_seen": 4475168,
"step": 332
},
{
"epoch": 4.282958199356913,
"grad_norm": 3.8595075607299805,
"learning_rate": 2.7750000000000005e-06,
"loss": 0.0261,
"num_input_tokens_seen": 4487872,
"step": 333
},
{
"epoch": 4.295819935691318,
"grad_norm": 8.00913143157959,
"learning_rate": 2.7833333333333335e-06,
"loss": 0.0266,
"num_input_tokens_seen": 4501216,
"step": 334
},
{
"epoch": 4.308681672025724,
"grad_norm": 2.2975409030914307,
"learning_rate": 2.791666666666667e-06,
"loss": 0.0194,
"num_input_tokens_seen": 4514336,
"step": 335
},
{
"epoch": 4.321543408360129,
"grad_norm": 1.9800870418548584,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0058,
"num_input_tokens_seen": 4528288,
"step": 336
},
{
"epoch": 4.334405144694534,
"grad_norm": 6.778807163238525,
"learning_rate": 2.8083333333333333e-06,
"loss": 0.0065,
"num_input_tokens_seen": 4541728,
"step": 337
},
{
"epoch": 4.347266881028939,
"grad_norm": 7.885656833648682,
"learning_rate": 2.816666666666667e-06,
"loss": 0.0202,
"num_input_tokens_seen": 4555552,
"step": 338
},
{
"epoch": 4.360128617363344,
"grad_norm": 9.400197982788086,
"learning_rate": 2.825e-06,
"loss": 0.0135,
"num_input_tokens_seen": 4568768,
"step": 339
},
{
"epoch": 4.372990353697749,
"grad_norm": 5.712286472320557,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.01,
"num_input_tokens_seen": 4582400,
"step": 340
},
{
"epoch": 4.385852090032154,
"grad_norm": 4.751468181610107,
"learning_rate": 2.841666666666667e-06,
"loss": 0.0051,
"num_input_tokens_seen": 4595360,
"step": 341
},
{
"epoch": 4.39871382636656,
"grad_norm": 2.9044384956359863,
"learning_rate": 2.85e-06,
"loss": 0.0293,
"num_input_tokens_seen": 4609632,
"step": 342
},
{
"epoch": 4.411575562700965,
"grad_norm": 5.8322224617004395,
"learning_rate": 2.8583333333333336e-06,
"loss": 0.046,
"num_input_tokens_seen": 4623232,
"step": 343
},
{
"epoch": 4.42443729903537,
"grad_norm": 0.7529265284538269,
"learning_rate": 2.866666666666667e-06,
"loss": 0.0024,
"num_input_tokens_seen": 4636864,
"step": 344
},
{
"epoch": 4.437299035369775,
"grad_norm": 4.490261554718018,
"learning_rate": 2.875e-06,
"loss": 0.0211,
"num_input_tokens_seen": 4649600,
"step": 345
},
{
"epoch": 4.45016077170418,
"grad_norm": 9.859882354736328,
"learning_rate": 2.8833333333333334e-06,
"loss": 0.0229,
"num_input_tokens_seen": 4663072,
"step": 346
},
{
"epoch": 4.463022508038585,
"grad_norm": 3.8769121170043945,
"learning_rate": 2.8916666666666672e-06,
"loss": 0.0103,
"num_input_tokens_seen": 4676960,
"step": 347
},
{
"epoch": 4.47588424437299,
"grad_norm": 6.608347415924072,
"learning_rate": 2.9e-06,
"loss": 0.0262,
"num_input_tokens_seen": 4690080,
"step": 348
},
{
"epoch": 4.488745980707396,
"grad_norm": 3.058494806289673,
"learning_rate": 2.9083333333333336e-06,
"loss": 0.0295,
"num_input_tokens_seen": 4704064,
"step": 349
},
{
"epoch": 4.501607717041801,
"grad_norm": 4.893054485321045,
"learning_rate": 2.916666666666667e-06,
"loss": 0.0149,
"num_input_tokens_seen": 4717536,
"step": 350
},
{
"epoch": 4.514469453376206,
"grad_norm": 5.85437536239624,
"learning_rate": 2.925e-06,
"loss": 0.0337,
"num_input_tokens_seen": 4731040,
"step": 351
},
{
"epoch": 4.527331189710611,
"grad_norm": 5.814053058624268,
"learning_rate": 2.9333333333333338e-06,
"loss": 0.0318,
"num_input_tokens_seen": 4744864,
"step": 352
},
{
"epoch": 4.540192926045016,
"grad_norm": 6.330677509307861,
"learning_rate": 2.941666666666667e-06,
"loss": 0.0213,
"num_input_tokens_seen": 4758240,
"step": 353
},
{
"epoch": 4.553054662379421,
"grad_norm": 0.9806466698646545,
"learning_rate": 2.95e-06,
"loss": 0.0048,
"num_input_tokens_seen": 4770368,
"step": 354
},
{
"epoch": 4.565916398713826,
"grad_norm": 3.046319007873535,
"learning_rate": 2.9583333333333335e-06,
"loss": 0.0326,
"num_input_tokens_seen": 4783328,
"step": 355
},
{
"epoch": 4.578778135048232,
"grad_norm": 5.004611015319824,
"learning_rate": 2.9666666666666673e-06,
"loss": 0.013,
"num_input_tokens_seen": 4797088,
"step": 356
},
{
"epoch": 4.591639871382637,
"grad_norm": 5.062610626220703,
"learning_rate": 2.9750000000000003e-06,
"loss": 0.0293,
"num_input_tokens_seen": 4810464,
"step": 357
},
{
"epoch": 4.604501607717042,
"grad_norm": 9.05587100982666,
"learning_rate": 2.9833333333333337e-06,
"loss": 0.0411,
"num_input_tokens_seen": 4823744,
"step": 358
},
{
"epoch": 4.617363344051447,
"grad_norm": 7.2998175621032715,
"learning_rate": 2.991666666666667e-06,
"loss": 0.0389,
"num_input_tokens_seen": 4837824,
"step": 359
},
{
"epoch": 4.630225080385852,
"grad_norm": 6.108879566192627,
"learning_rate": 3e-06,
"loss": 0.0395,
"num_input_tokens_seen": 4852096,
"step": 360
},
{
"epoch": 4.643086816720257,
"grad_norm": 4.677525043487549,
"learning_rate": 3.0083333333333335e-06,
"loss": 0.0065,
"num_input_tokens_seen": 4865600,
"step": 361
},
{
"epoch": 4.655948553054662,
"grad_norm": 9.179205894470215,
"learning_rate": 3.0166666666666673e-06,
"loss": 0.0294,
"num_input_tokens_seen": 4878592,
"step": 362
},
{
"epoch": 4.668810289389068,
"grad_norm": 3.478533983230591,
"learning_rate": 3.0250000000000003e-06,
"loss": 0.0192,
"num_input_tokens_seen": 4892064,
"step": 363
},
{
"epoch": 4.681672025723473,
"grad_norm": 6.015811443328857,
"learning_rate": 3.0333333333333337e-06,
"loss": 0.0179,
"num_input_tokens_seen": 4906304,
"step": 364
},
{
"epoch": 4.694533762057878,
"grad_norm": 3.1433732509613037,
"learning_rate": 3.0416666666666666e-06,
"loss": 0.0131,
"num_input_tokens_seen": 4920480,
"step": 365
},
{
"epoch": 4.707395498392283,
"grad_norm": 3.894927501678467,
"learning_rate": 3.05e-06,
"loss": 0.0216,
"num_input_tokens_seen": 4933376,
"step": 366
},
{
"epoch": 4.720257234726688,
"grad_norm": 3.4421629905700684,
"learning_rate": 3.058333333333334e-06,
"loss": 0.0171,
"num_input_tokens_seen": 4947872,
"step": 367
},
{
"epoch": 4.733118971061093,
"grad_norm": 2.935985803604126,
"learning_rate": 3.066666666666667e-06,
"loss": 0.0129,
"num_input_tokens_seen": 4961280,
"step": 368
},
{
"epoch": 4.745980707395498,
"grad_norm": 7.003020763397217,
"learning_rate": 3.075e-06,
"loss": 0.0268,
"num_input_tokens_seen": 4975072,
"step": 369
},
{
"epoch": 4.758842443729904,
"grad_norm": 5.833902835845947,
"learning_rate": 3.0833333333333336e-06,
"loss": 0.0313,
"num_input_tokens_seen": 4988928,
"step": 370
},
{
"epoch": 4.771704180064309,
"grad_norm": 4.902740001678467,
"learning_rate": 3.0916666666666666e-06,
"loss": 0.0197,
"num_input_tokens_seen": 5002656,
"step": 371
},
{
"epoch": 4.784565916398714,
"grad_norm": 1.6855580806732178,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0051,
"num_input_tokens_seen": 5016928,
"step": 372
},
{
"epoch": 4.797427652733119,
"grad_norm": 3.116506814956665,
"learning_rate": 3.1083333333333338e-06,
"loss": 0.0107,
"num_input_tokens_seen": 5030432,
"step": 373
},
{
"epoch": 4.810289389067524,
"grad_norm": 2.606302261352539,
"learning_rate": 3.1166666666666668e-06,
"loss": 0.0299,
"num_input_tokens_seen": 5044128,
"step": 374
},
{
"epoch": 4.823151125401929,
"grad_norm": 5.198400497436523,
"learning_rate": 3.125e-06,
"loss": 0.0549,
"num_input_tokens_seen": 5058592,
"step": 375
},
{
"epoch": 4.836012861736334,
"grad_norm": 3.7756950855255127,
"learning_rate": 3.133333333333334e-06,
"loss": 0.0163,
"num_input_tokens_seen": 5071680,
"step": 376
},
{
"epoch": 4.84887459807074,
"grad_norm": 3.266502618789673,
"learning_rate": 3.141666666666667e-06,
"loss": 0.0133,
"num_input_tokens_seen": 5085760,
"step": 377
},
{
"epoch": 4.861736334405145,
"grad_norm": 3.7333478927612305,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0338,
"num_input_tokens_seen": 5099168,
"step": 378
},
{
"epoch": 4.87459807073955,
"grad_norm": 6.106029033660889,
"learning_rate": 3.1583333333333337e-06,
"loss": 0.0219,
"num_input_tokens_seen": 5112512,
"step": 379
},
{
"epoch": 4.887459807073955,
"grad_norm": 3.018552541732788,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.0113,
"num_input_tokens_seen": 5125696,
"step": 380
},
{
"epoch": 4.90032154340836,
"grad_norm": 5.034224987030029,
"learning_rate": 3.175e-06,
"loss": 0.0297,
"num_input_tokens_seen": 5138816,
"step": 381
},
{
"epoch": 4.913183279742765,
"grad_norm": 6.260331630706787,
"learning_rate": 3.183333333333334e-06,
"loss": 0.0417,
"num_input_tokens_seen": 5152320,
"step": 382
},
{
"epoch": 4.92604501607717,
"grad_norm": 4.149762153625488,
"learning_rate": 3.191666666666667e-06,
"loss": 0.027,
"num_input_tokens_seen": 5165696,
"step": 383
},
{
"epoch": 4.938906752411576,
"grad_norm": 2.9361073970794678,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0271,
"num_input_tokens_seen": 5178368,
"step": 384
},
{
"epoch": 4.951768488745981,
"grad_norm": 5.155781269073486,
"learning_rate": 3.2083333333333337e-06,
"loss": 0.0207,
"num_input_tokens_seen": 5192736,
"step": 385
},
{
"epoch": 4.951768488745981,
"num_input_tokens_seen": 5192736,
"step": 385,
"total_flos": 2.3382655808988774e+17,
"train_loss": 0.7082552919200585,
"train_runtime": 5092.4527,
"train_samples_per_second": 19.519,
"train_steps_per_second": 0.076
}
],
"logging_steps": 1,
"max_steps": 385,
"num_input_tokens_seen": 5192736,
"num_train_epochs": 5,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.3382655808988774e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}