llmTechChat-lora / checkpoint-640 /trainer_state.json
Epiculous's picture
Upload folder using huggingface_hub
475bc99 verified
raw
history blame contribute delete
No virus
80.7 kB
{
"best_metric": 1.8478573560714722,
"best_model_checkpoint": "./llmTechChat-lora/checkpoint-320",
"epoch": 3.9485580670303975,
"eval_steps": 40,
"global_step": 640,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2.9999999999999997e-05,
"loss": 4.3577,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 4.326064109802246,
"eval_runtime": 288.7431,
"eval_samples_per_second": 206.796,
"eval_steps_per_second": 206.796,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 5.9999999999999995e-05,
"loss": 4.2951,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 8.999999999999999e-05,
"loss": 3.9156,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 0.00011999999999999999,
"loss": 3.4836,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 0.00015,
"loss": 3.1743,
"step": 5
},
{
"epoch": 0.04,
"learning_rate": 0.00017999999999999998,
"loss": 2.8242,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 0.00020999999999999998,
"loss": 2.7478,
"step": 7
},
{
"epoch": 0.05,
"learning_rate": 0.00023999999999999998,
"loss": 2.7198,
"step": 8
},
{
"epoch": 0.06,
"learning_rate": 0.00027,
"loss": 2.6025,
"step": 9
},
{
"epoch": 0.06,
"learning_rate": 0.0003,
"loss": 2.5337,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 0.00029999813499925374,
"loss": 2.5019,
"step": 11
},
{
"epoch": 0.07,
"learning_rate": 0.0002999925400433914,
"loss": 2.3393,
"step": 12
},
{
"epoch": 0.08,
"learning_rate": 0.00029998321527154097,
"loss": 2.3796,
"step": 13
},
{
"epoch": 0.09,
"learning_rate": 0.0002999701609155785,
"loss": 2.3054,
"step": 14
},
{
"epoch": 0.09,
"learning_rate": 0.0002999533773001224,
"loss": 2.2898,
"step": 15
},
{
"epoch": 0.1,
"learning_rate": 0.00029993286484252544,
"loss": 2.2254,
"step": 16
},
{
"epoch": 0.11,
"learning_rate": 0.00029990862405286433,
"loss": 2.2197,
"step": 17
},
{
"epoch": 0.11,
"learning_rate": 0.0002998806555339269,
"loss": 2.1797,
"step": 18
},
{
"epoch": 0.12,
"learning_rate": 0.0002998489599811972,
"loss": 2.1854,
"step": 19
},
{
"epoch": 0.12,
"learning_rate": 0.0002998135381828383,
"loss": 2.2105,
"step": 20
},
{
"epoch": 0.13,
"learning_rate": 0.00029977439101967274,
"loss": 2.1866,
"step": 21
},
{
"epoch": 0.14,
"learning_rate": 0.00029973151946516025,
"loss": 2.1718,
"step": 22
},
{
"epoch": 0.14,
"learning_rate": 0.0002996849245853739,
"loss": 2.1158,
"step": 23
},
{
"epoch": 0.15,
"learning_rate": 0.0002996346075389736,
"loss": 2.1495,
"step": 24
},
{
"epoch": 0.16,
"learning_rate": 0.00029958056957717696,
"loss": 2.1326,
"step": 25
},
{
"epoch": 0.16,
"learning_rate": 0.00029952281204372863,
"loss": 2.1391,
"step": 26
},
{
"epoch": 0.17,
"learning_rate": 0.0002994613363748664,
"loss": 2.1039,
"step": 27
},
{
"epoch": 0.17,
"learning_rate": 0.00029939614409928584,
"loss": 2.132,
"step": 28
},
{
"epoch": 0.18,
"learning_rate": 0.00029932723683810225,
"loss": 2.1278,
"step": 29
},
{
"epoch": 0.19,
"learning_rate": 0.0002992546163048102,
"loss": 2.0698,
"step": 30
},
{
"epoch": 0.19,
"learning_rate": 0.00029917828430524096,
"loss": 2.0757,
"step": 31
},
{
"epoch": 0.2,
"learning_rate": 0.0002990982427375177,
"loss": 2.0689,
"step": 32
},
{
"epoch": 0.21,
"learning_rate": 0.0002990144935920083,
"loss": 2.0986,
"step": 33
},
{
"epoch": 0.21,
"learning_rate": 0.0002989270389512756,
"loss": 2.058,
"step": 34
},
{
"epoch": 0.22,
"learning_rate": 0.0002988358809900258,
"loss": 2.0451,
"step": 35
},
{
"epoch": 0.22,
"learning_rate": 0.00029874102197505447,
"loss": 2.0613,
"step": 36
},
{
"epoch": 0.23,
"learning_rate": 0.0002986424642651902,
"loss": 2.0796,
"step": 37
},
{
"epoch": 0.24,
"learning_rate": 0.0002985402103112355,
"loss": 2.086,
"step": 38
},
{
"epoch": 0.24,
"learning_rate": 0.00029843426265590656,
"loss": 2.0275,
"step": 39
},
{
"epoch": 0.25,
"learning_rate": 0.0002983246239337692,
"loss": 2.0615,
"step": 40
},
{
"epoch": 0.25,
"eval_loss": 2.0476396083831787,
"eval_runtime": 289.9408,
"eval_samples_per_second": 205.942,
"eval_steps_per_second": 205.942,
"step": 40
},
{
"epoch": 0.26,
"learning_rate": 0.0002982112968711744,
"loss": 2.1012,
"step": 41
},
{
"epoch": 0.26,
"learning_rate": 0.0002980942842861893,
"loss": 2.0537,
"step": 42
},
{
"epoch": 0.27,
"learning_rate": 0.00029797358908852816,
"loss": 2.0595,
"step": 43
},
{
"epoch": 0.27,
"learning_rate": 0.00029784921427947946,
"loss": 2.0409,
"step": 44
},
{
"epoch": 0.28,
"learning_rate": 0.0002977211629518312,
"loss": 2.0045,
"step": 45
},
{
"epoch": 0.29,
"learning_rate": 0.00029758943828979444,
"loss": 2.0176,
"step": 46
},
{
"epoch": 0.29,
"learning_rate": 0.0002974540435689237,
"loss": 2.0189,
"step": 47
},
{
"epoch": 0.3,
"learning_rate": 0.0002973149821560358,
"loss": 2.0169,
"step": 48
},
{
"epoch": 0.31,
"learning_rate": 0.00029717225750912585,
"loss": 2.0553,
"step": 49
},
{
"epoch": 0.31,
"learning_rate": 0.00029702587317728153,
"loss": 2.0569,
"step": 50
},
{
"epoch": 0.32,
"learning_rate": 0.0002968758328005947,
"loss": 2.0522,
"step": 51
},
{
"epoch": 0.32,
"learning_rate": 0.0002967221401100708,
"loss": 2.0285,
"step": 52
},
{
"epoch": 0.33,
"learning_rate": 0.00029656479892753635,
"loss": 2.0266,
"step": 53
},
{
"epoch": 0.34,
"learning_rate": 0.0002964038131655436,
"loss": 2.0161,
"step": 54
},
{
"epoch": 0.34,
"learning_rate": 0.0002962391868272735,
"loss": 2.0122,
"step": 55
},
{
"epoch": 0.35,
"learning_rate": 0.00029607092400643593,
"loss": 1.9926,
"step": 56
},
{
"epoch": 0.36,
"learning_rate": 0.000295899028887168,
"loss": 2.0123,
"step": 57
},
{
"epoch": 0.36,
"learning_rate": 0.0002957235057439301,
"loss": 2.0121,
"step": 58
},
{
"epoch": 0.37,
"learning_rate": 0.0002955443589413994,
"loss": 2.0245,
"step": 59
},
{
"epoch": 0.37,
"learning_rate": 0.00029536159293436166,
"loss": 2.0127,
"step": 60
},
{
"epoch": 0.38,
"learning_rate": 0.0002951752122676,
"loss": 2.0057,
"step": 61
},
{
"epoch": 0.39,
"learning_rate": 0.000294985221575782,
"loss": 2.0226,
"step": 62
},
{
"epoch": 0.39,
"learning_rate": 0.0002947916255833451,
"loss": 2.0032,
"step": 63
},
{
"epoch": 0.4,
"learning_rate": 0.00029459442910437797,
"loss": 2.045,
"step": 64
},
{
"epoch": 0.41,
"learning_rate": 0.00029439363704250176,
"loss": 1.9794,
"step": 65
},
{
"epoch": 0.41,
"learning_rate": 0.0002941892543907478,
"loss": 2.0009,
"step": 66
},
{
"epoch": 0.42,
"learning_rate": 0.0002939812862314333,
"loss": 1.9508,
"step": 67
},
{
"epoch": 0.42,
"learning_rate": 0.00029376973773603533,
"loss": 1.9913,
"step": 68
},
{
"epoch": 0.43,
"learning_rate": 0.0002935546141650618,
"loss": 1.9762,
"step": 69
},
{
"epoch": 0.44,
"learning_rate": 0.00029333592086792107,
"loss": 2.0,
"step": 70
},
{
"epoch": 0.44,
"learning_rate": 0.0002931136632827886,
"loss": 1.9629,
"step": 71
},
{
"epoch": 0.45,
"learning_rate": 0.0002928878469364719,
"loss": 2.0009,
"step": 72
},
{
"epoch": 0.46,
"learning_rate": 0.00029265847744427303,
"loss": 1.9714,
"step": 73
},
{
"epoch": 0.46,
"learning_rate": 0.0002924255605098489,
"loss": 1.9474,
"step": 74
},
{
"epoch": 0.47,
"learning_rate": 0.0002921891019250697,
"loss": 1.9959,
"step": 75
},
{
"epoch": 0.47,
"learning_rate": 0.0002919491075698746,
"loss": 1.9846,
"step": 76
},
{
"epoch": 0.48,
"learning_rate": 0.00029170558341212554,
"loss": 1.9978,
"step": 77
},
{
"epoch": 0.49,
"learning_rate": 0.00029145853550745904,
"loss": 1.9527,
"step": 78
},
{
"epoch": 0.49,
"learning_rate": 0.00029120796999913546,
"loss": 1.9585,
"step": 79
},
{
"epoch": 0.5,
"learning_rate": 0.0002909538931178862,
"loss": 1.9905,
"step": 80
},
{
"epoch": 0.5,
"eval_loss": 1.96906578540802,
"eval_runtime": 291.8459,
"eval_samples_per_second": 204.598,
"eval_steps_per_second": 204.598,
"step": 80
},
{
"epoch": 0.51,
"learning_rate": 0.00029069631118175903,
"loss": 1.9926,
"step": 81
},
{
"epoch": 0.51,
"learning_rate": 0.00029043523059596053,
"loss": 1.9916,
"step": 82
},
{
"epoch": 0.52,
"learning_rate": 0.0002901706578526973,
"loss": 1.9545,
"step": 83
},
{
"epoch": 0.52,
"learning_rate": 0.0002899025995310141,
"loss": 1.9399,
"step": 84
},
{
"epoch": 0.53,
"learning_rate": 0.00028963106229663063,
"loss": 1.9515,
"step": 85
},
{
"epoch": 0.54,
"learning_rate": 0.00028935605290177535,
"loss": 1.9855,
"step": 86
},
{
"epoch": 0.54,
"learning_rate": 0.0002890775781850181,
"loss": 2.0159,
"step": 87
},
{
"epoch": 0.55,
"learning_rate": 0.00028879564507109946,
"loss": 1.9885,
"step": 88
},
{
"epoch": 0.55,
"learning_rate": 0.00028851026057075916,
"loss": 1.9625,
"step": 89
},
{
"epoch": 0.56,
"learning_rate": 0.00028822143178056114,
"loss": 1.9161,
"step": 90
},
{
"epoch": 0.57,
"learning_rate": 0.0002879291658827176,
"loss": 1.9141,
"step": 91
},
{
"epoch": 0.57,
"learning_rate": 0.00028763347014491,
"loss": 1.9867,
"step": 92
},
{
"epoch": 0.58,
"learning_rate": 0.00028733435192010887,
"loss": 1.9325,
"step": 93
},
{
"epoch": 0.59,
"learning_rate": 0.0002870318186463901,
"loss": 1.9517,
"step": 94
},
{
"epoch": 0.59,
"learning_rate": 0.00028672587784675096,
"loss": 1.9435,
"step": 95
},
{
"epoch": 0.6,
"learning_rate": 0.0002864165371289223,
"loss": 1.9428,
"step": 96
},
{
"epoch": 0.6,
"learning_rate": 0.0002861038041851797,
"loss": 1.9182,
"step": 97
},
{
"epoch": 0.61,
"learning_rate": 0.0002857876867921522,
"loss": 1.9344,
"step": 98
},
{
"epoch": 0.62,
"learning_rate": 0.0002854681928106287,
"loss": 1.9652,
"step": 99
},
{
"epoch": 0.62,
"learning_rate": 0.0002851453301853628,
"loss": 1.9332,
"step": 100
},
{
"epoch": 0.63,
"learning_rate": 0.000284819106944875,
"loss": 1.9042,
"step": 101
},
{
"epoch": 0.64,
"learning_rate": 0.0002844895312012531,
"loss": 1.9571,
"step": 102
},
{
"epoch": 0.64,
"learning_rate": 0.0002841566111499505,
"loss": 1.9129,
"step": 103
},
{
"epoch": 0.65,
"learning_rate": 0.0002838203550695825,
"loss": 1.9347,
"step": 104
},
{
"epoch": 0.65,
"learning_rate": 0.00028348077132172027,
"loss": 1.9461,
"step": 105
},
{
"epoch": 0.66,
"learning_rate": 0.0002831378683506831,
"loss": 1.9188,
"step": 106
},
{
"epoch": 0.67,
"learning_rate": 0.00028279165468332823,
"loss": 1.9491,
"step": 107
},
{
"epoch": 0.67,
"learning_rate": 0.000282442138928839,
"loss": 1.961,
"step": 108
},
{
"epoch": 0.68,
"learning_rate": 0.00028208932977851067,
"loss": 1.9048,
"step": 109
},
{
"epoch": 0.69,
"learning_rate": 0.0002817332360055343,
"loss": 1.9493,
"step": 110
},
{
"epoch": 0.69,
"learning_rate": 0.0002813738664647784,
"loss": 1.9685,
"step": 111
},
{
"epoch": 0.7,
"learning_rate": 0.00028101123009256947,
"loss": 1.9054,
"step": 112
},
{
"epoch": 0.7,
"learning_rate": 0.0002806453359064686,
"loss": 1.9317,
"step": 113
},
{
"epoch": 0.71,
"learning_rate": 0.00028027619300504834,
"loss": 1.9701,
"step": 114
},
{
"epoch": 0.72,
"learning_rate": 0.0002799038105676658,
"loss": 1.9426,
"step": 115
},
{
"epoch": 0.72,
"learning_rate": 0.0002795281978542346,
"loss": 1.957,
"step": 116
},
{
"epoch": 0.73,
"learning_rate": 0.0002791493642049947,
"loss": 1.9535,
"step": 117
},
{
"epoch": 0.74,
"learning_rate": 0.0002787673190402799,
"loss": 1.9045,
"step": 118
},
{
"epoch": 0.74,
"learning_rate": 0.00027838207186028376,
"loss": 1.9575,
"step": 119
},
{
"epoch": 0.75,
"learning_rate": 0.0002779936322448233,
"loss": 1.8699,
"step": 120
},
{
"epoch": 0.75,
"eval_loss": 1.9343771934509277,
"eval_runtime": 292.8194,
"eval_samples_per_second": 203.917,
"eval_steps_per_second": 203.917,
"step": 120
},
{
"epoch": 0.75,
"learning_rate": 0.0002776020098531009,
"loss": 1.956,
"step": 121
},
{
"epoch": 0.76,
"learning_rate": 0.00027720721442346387,
"loss": 1.8958,
"step": 122
},
{
"epoch": 0.77,
"learning_rate": 0.0002768092557731625,
"loss": 1.9157,
"step": 123
},
{
"epoch": 0.77,
"learning_rate": 0.00027640814379810587,
"loss": 1.9118,
"step": 124
},
{
"epoch": 0.78,
"learning_rate": 0.0002760038884726157,
"loss": 1.9707,
"step": 125
},
{
"epoch": 0.79,
"learning_rate": 0.0002755964998491785,
"loss": 1.9563,
"step": 126
},
{
"epoch": 0.79,
"learning_rate": 0.0002751859880581954,
"loss": 1.9825,
"step": 127
},
{
"epoch": 0.8,
"learning_rate": 0.0002747723633077303,
"loss": 1.9687,
"step": 128
},
{
"epoch": 0.8,
"learning_rate": 0.0002743556358832562,
"loss": 1.9378,
"step": 129
},
{
"epoch": 0.81,
"learning_rate": 0.00027393581614739923,
"loss": 1.9307,
"step": 130
},
{
"epoch": 0.82,
"learning_rate": 0.00027351291453968086,
"loss": 1.9333,
"step": 131
},
{
"epoch": 0.82,
"learning_rate": 0.0002730869415762587,
"loss": 1.9229,
"step": 132
},
{
"epoch": 0.83,
"learning_rate": 0.0002726579078496647,
"loss": 1.911,
"step": 133
},
{
"epoch": 0.84,
"learning_rate": 0.00027222582402854176,
"loss": 1.9556,
"step": 134
},
{
"epoch": 0.84,
"learning_rate": 0.0002717907008573785,
"loss": 1.9008,
"step": 135
},
{
"epoch": 0.85,
"learning_rate": 0.0002713525491562421,
"loss": 1.9651,
"step": 136
},
{
"epoch": 0.85,
"learning_rate": 0.0002709113798205093,
"loss": 1.9337,
"step": 137
},
{
"epoch": 0.86,
"learning_rate": 0.00027046720382059526,
"loss": 1.9485,
"step": 138
},
{
"epoch": 0.87,
"learning_rate": 0.00027002003220168093,
"loss": 1.8647,
"step": 139
},
{
"epoch": 0.87,
"learning_rate": 0.0002695698760834384,
"loss": 1.9288,
"step": 140
},
{
"epoch": 0.88,
"learning_rate": 0.00026911674665975417,
"loss": 1.9535,
"step": 141
},
{
"epoch": 0.89,
"learning_rate": 0.0002686606551984512,
"loss": 1.932,
"step": 142
},
{
"epoch": 0.89,
"learning_rate": 0.00026820161304100823,
"loss": 1.9516,
"step": 143
},
{
"epoch": 0.9,
"learning_rate": 0.0002677396316022783,
"loss": 1.9347,
"step": 144
},
{
"epoch": 0.9,
"learning_rate": 0.00026727472237020447,
"loss": 1.9473,
"step": 145
},
{
"epoch": 0.91,
"learning_rate": 0.0002668068969055341,
"loss": 1.9428,
"step": 146
},
{
"epoch": 0.92,
"learning_rate": 0.0002663361668415318,
"loss": 1.9204,
"step": 147
},
{
"epoch": 0.92,
"learning_rate": 0.0002658625438836899,
"loss": 1.9039,
"step": 148
},
{
"epoch": 0.93,
"learning_rate": 0.0002653860398094373,
"loss": 1.9166,
"step": 149
},
{
"epoch": 0.94,
"learning_rate": 0.00026490666646784665,
"loss": 1.9072,
"step": 150
},
{
"epoch": 0.94,
"learning_rate": 0.00026442443577933994,
"loss": 1.9014,
"step": 151
},
{
"epoch": 0.95,
"learning_rate": 0.0002639393597353917,
"loss": 1.9272,
"step": 152
},
{
"epoch": 0.95,
"learning_rate": 0.00026345145039823097,
"loss": 1.9274,
"step": 153
},
{
"epoch": 0.96,
"learning_rate": 0.00026296071990054165,
"loss": 1.9548,
"step": 154
},
{
"epoch": 0.97,
"learning_rate": 0.0002624671804451601,
"loss": 1.928,
"step": 155
},
{
"epoch": 0.97,
"learning_rate": 0.0002619708443047725,
"loss": 1.9072,
"step": 156
},
{
"epoch": 0.98,
"learning_rate": 0.00026147172382160914,
"loss": 1.9116,
"step": 157
},
{
"epoch": 0.99,
"learning_rate": 0.0002609698314071376,
"loss": 1.915,
"step": 158
},
{
"epoch": 0.99,
"learning_rate": 0.0002604651795417543,
"loss": 1.915,
"step": 159
},
{
"epoch": 1.0,
"learning_rate": 0.0002599577807744739,
"loss": 1.9604,
"step": 160
},
{
"epoch": 1.0,
"eval_loss": 1.911091685295105,
"eval_runtime": 289.5613,
"eval_samples_per_second": 206.212,
"eval_steps_per_second": 206.212,
"step": 160
},
{
"epoch": 1.0,
"learning_rate": 0.0002594476477226176,
"loss": 1.9335,
"step": 161
},
{
"epoch": 1.01,
"learning_rate": 0.00025893479307149893,
"loss": 1.9033,
"step": 162
},
{
"epoch": 1.0,
"learning_rate": 0.0002584192295741087,
"loss": 1.874,
"step": 163
},
{
"epoch": 1.01,
"learning_rate": 0.00025790097005079764,
"loss": 1.7896,
"step": 164
},
{
"epoch": 1.01,
"learning_rate": 0.0002573800273889577,
"loss": 1.7936,
"step": 165
},
{
"epoch": 1.02,
"learning_rate": 0.00025685641454270173,
"loss": 1.8321,
"step": 166
},
{
"epoch": 1.03,
"learning_rate": 0.00025633014453254086,
"loss": 1.8105,
"step": 167
},
{
"epoch": 1.03,
"learning_rate": 0.0002558012304450613,
"loss": 1.8068,
"step": 168
},
{
"epoch": 1.04,
"learning_rate": 0.0002552696854325987,
"loss": 1.7953,
"step": 169
},
{
"epoch": 1.05,
"learning_rate": 0.0002547355227129109,
"loss": 1.8073,
"step": 170
},
{
"epoch": 1.05,
"learning_rate": 0.0002541987555688496,
"loss": 1.8095,
"step": 171
},
{
"epoch": 1.06,
"learning_rate": 0.0002536593973480297,
"loss": 1.7939,
"step": 172
},
{
"epoch": 1.06,
"learning_rate": 0.0002531174614624977,
"loss": 1.8226,
"step": 173
},
{
"epoch": 1.07,
"learning_rate": 0.000252572961388398,
"loss": 1.7815,
"step": 174
},
{
"epoch": 1.08,
"learning_rate": 0.00025202591066563786,
"loss": 1.7929,
"step": 175
},
{
"epoch": 1.08,
"learning_rate": 0.00025147632289755075,
"loss": 1.8027,
"step": 176
},
{
"epoch": 1.09,
"learning_rate": 0.0002509242117505579,
"loss": 1.8349,
"step": 177
},
{
"epoch": 1.1,
"learning_rate": 0.0002503695909538287,
"loss": 1.7828,
"step": 178
},
{
"epoch": 1.1,
"learning_rate": 0.0002498124742989391,
"loss": 1.814,
"step": 179
},
{
"epoch": 1.11,
"learning_rate": 0.0002492528756395289,
"loss": 1.821,
"step": 180
},
{
"epoch": 1.11,
"learning_rate": 0.0002486908088909569,
"loss": 1.8023,
"step": 181
},
{
"epoch": 1.12,
"learning_rate": 0.0002481262880299552,
"loss": 1.7953,
"step": 182
},
{
"epoch": 1.13,
"learning_rate": 0.0002475593270942814,
"loss": 1.8497,
"step": 183
},
{
"epoch": 1.13,
"learning_rate": 0.0002469899401823699,
"loss": 1.7957,
"step": 184
},
{
"epoch": 1.14,
"learning_rate": 0.0002464181414529809,
"loss": 1.7958,
"step": 185
},
{
"epoch": 1.15,
"learning_rate": 0.0002458439451248484,
"loss": 1.8024,
"step": 186
},
{
"epoch": 1.15,
"learning_rate": 0.0002452673654763268,
"loss": 1.8306,
"step": 187
},
{
"epoch": 1.16,
"learning_rate": 0.0002446884168450358,
"loss": 1.8174,
"step": 188
},
{
"epoch": 1.16,
"learning_rate": 0.00024410711362750386,
"loss": 1.8032,
"step": 189
},
{
"epoch": 1.17,
"learning_rate": 0.00024352347027881003,
"loss": 1.8087,
"step": 190
},
{
"epoch": 1.18,
"learning_rate": 0.0002429375013122247,
"loss": 1.7648,
"step": 191
},
{
"epoch": 1.18,
"learning_rate": 0.0002423492212988487,
"loss": 1.8544,
"step": 192
},
{
"epoch": 1.19,
"learning_rate": 0.00024175864486725092,
"loss": 1.7978,
"step": 193
},
{
"epoch": 1.2,
"learning_rate": 0.0002411657867031045,
"loss": 1.7866,
"step": 194
},
{
"epoch": 1.2,
"learning_rate": 0.0002405706615488216,
"loss": 1.8074,
"step": 195
},
{
"epoch": 1.21,
"learning_rate": 0.00023997328420318704,
"loss": 1.7842,
"step": 196
},
{
"epoch": 1.21,
"learning_rate": 0.00023937366952099005,
"loss": 1.781,
"step": 197
},
{
"epoch": 1.22,
"learning_rate": 0.00023877183241265514,
"loss": 1.786,
"step": 198
},
{
"epoch": 1.23,
"learning_rate": 0.00023816778784387094,
"loss": 1.8091,
"step": 199
},
{
"epoch": 1.23,
"learning_rate": 0.00023756155083521846,
"loss": 1.7684,
"step": 200
},
{
"epoch": 1.23,
"eval_loss": 1.897764801979065,
"eval_runtime": 292.524,
"eval_samples_per_second": 204.123,
"eval_steps_per_second": 204.123,
"step": 200
},
{
"epoch": 1.24,
"learning_rate": 0.00023695313646179735,
"loss": 1.7693,
"step": 201
},
{
"epoch": 1.25,
"learning_rate": 0.00023634255985285102,
"loss": 1.7598,
"step": 202
},
{
"epoch": 1.25,
"learning_rate": 0.00023572983619139058,
"loss": 1.8441,
"step": 203
},
{
"epoch": 1.26,
"learning_rate": 0.00023511498071381726,
"loss": 1.7768,
"step": 204
},
{
"epoch": 1.26,
"learning_rate": 0.00023449800870954326,
"loss": 1.784,
"step": 205
},
{
"epoch": 1.27,
"learning_rate": 0.00023387893552061199,
"loss": 1.7905,
"step": 206
},
{
"epoch": 1.28,
"learning_rate": 0.00023325777654131623,
"loss": 1.7996,
"step": 207
},
{
"epoch": 1.28,
"learning_rate": 0.00023263454721781537,
"loss": 1.7546,
"step": 208
},
{
"epoch": 1.29,
"learning_rate": 0.0002320092630477515,
"loss": 1.8043,
"step": 209
},
{
"epoch": 1.3,
"learning_rate": 0.0002313819395798639,
"loss": 1.7817,
"step": 210
},
{
"epoch": 1.3,
"learning_rate": 0.00023075259241360233,
"loss": 1.8117,
"step": 211
},
{
"epoch": 1.31,
"learning_rate": 0.00023012123719873926,
"loss": 1.7798,
"step": 212
},
{
"epoch": 1.31,
"learning_rate": 0.0002294878896349807,
"loss": 1.763,
"step": 213
},
{
"epoch": 1.32,
"learning_rate": 0.00022885256547157566,
"loss": 1.8169,
"step": 214
},
{
"epoch": 1.33,
"learning_rate": 0.0002282152805069247,
"loss": 1.797,
"step": 215
},
{
"epoch": 1.33,
"learning_rate": 0.00022757605058818688,
"loss": 1.7768,
"step": 216
},
{
"epoch": 1.34,
"learning_rate": 0.0002269348916108859,
"loss": 1.8005,
"step": 217
},
{
"epoch": 1.35,
"learning_rate": 0.00022629181951851473,
"loss": 1.7734,
"step": 218
},
{
"epoch": 1.35,
"learning_rate": 0.0002256468503021391,
"loss": 1.774,
"step": 219
},
{
"epoch": 1.36,
"learning_rate": 0.000225,
"loss": 1.7865,
"step": 220
},
{
"epoch": 1.36,
"learning_rate": 0.00022435128469711465,
"loss": 1.7613,
"step": 221
},
{
"epoch": 1.37,
"learning_rate": 0.00022370072052487668,
"loss": 1.8013,
"step": 222
},
{
"epoch": 1.38,
"learning_rate": 0.00022304832366065505,
"loss": 1.7617,
"step": 223
},
{
"epoch": 1.38,
"learning_rate": 0.00022239411032739162,
"loss": 1.7323,
"step": 224
},
{
"epoch": 1.39,
"learning_rate": 0.00022173809679319772,
"loss": 1.7838,
"step": 225
},
{
"epoch": 1.4,
"learning_rate": 0.0002210802993709498,
"loss": 1.8099,
"step": 226
},
{
"epoch": 1.4,
"learning_rate": 0.00022042073441788358,
"loss": 1.82,
"step": 227
},
{
"epoch": 1.41,
"learning_rate": 0.00021975941833518757,
"loss": 1.772,
"step": 228
},
{
"epoch": 1.41,
"learning_rate": 0.00021909636756759483,
"loss": 1.8044,
"step": 229
},
{
"epoch": 1.42,
"learning_rate": 0.00021843159860297442,
"loss": 1.7679,
"step": 230
},
{
"epoch": 1.43,
"learning_rate": 0.00021776512797192123,
"loss": 1.7786,
"step": 231
},
{
"epoch": 1.43,
"learning_rate": 0.00021709697224734487,
"loss": 1.8305,
"step": 232
},
{
"epoch": 1.44,
"learning_rate": 0.00021642714804405772,
"loss": 1.8044,
"step": 233
},
{
"epoch": 1.45,
"learning_rate": 0.0002157556720183616,
"loss": 1.7838,
"step": 234
},
{
"epoch": 1.45,
"learning_rate": 0.00021508256086763368,
"loss": 1.7639,
"step": 235
},
{
"epoch": 1.46,
"learning_rate": 0.00021440783132991136,
"loss": 1.8067,
"step": 236
},
{
"epoch": 1.46,
"learning_rate": 0.00021373150018347576,
"loss": 1.772,
"step": 237
},
{
"epoch": 1.47,
"learning_rate": 0.0002130535842464348,
"loss": 1.7985,
"step": 238
},
{
"epoch": 1.48,
"learning_rate": 0.00021237410037630493,
"loss": 1.7746,
"step": 239
},
{
"epoch": 1.48,
"learning_rate": 0.00021169306546959174,
"loss": 1.7673,
"step": 240
},
{
"epoch": 1.48,
"eval_loss": 1.880852222442627,
"eval_runtime": 291.0717,
"eval_samples_per_second": 205.142,
"eval_steps_per_second": 205.142,
"step": 240
},
{
"epoch": 1.49,
"learning_rate": 0.00021101049646137003,
"loss": 1.7555,
"step": 241
},
{
"epoch": 1.49,
"learning_rate": 0.0002103264103248626,
"loss": 1.7722,
"step": 242
},
{
"epoch": 1.5,
"learning_rate": 0.00020964082407101824,
"loss": 1.7686,
"step": 243
},
{
"epoch": 1.51,
"learning_rate": 0.00020895375474808852,
"loss": 1.7625,
"step": 244
},
{
"epoch": 1.51,
"learning_rate": 0.0002082652194412042,
"loss": 1.7524,
"step": 245
},
{
"epoch": 1.52,
"learning_rate": 0.00020757523527195005,
"loss": 1.7785,
"step": 246
},
{
"epoch": 1.53,
"learning_rate": 0.00020688381939793928,
"loss": 1.7901,
"step": 247
},
{
"epoch": 1.53,
"learning_rate": 0.0002061909890123868,
"loss": 1.7941,
"step": 248
},
{
"epoch": 1.54,
"learning_rate": 0.00020549676134368184,
"loss": 1.8157,
"step": 249
},
{
"epoch": 1.54,
"learning_rate": 0.00020480115365495926,
"loss": 1.8354,
"step": 250
},
{
"epoch": 1.55,
"learning_rate": 0.00020410418324367055,
"loss": 1.7764,
"step": 251
},
{
"epoch": 1.56,
"learning_rate": 0.0002034058674411535,
"loss": 1.7677,
"step": 252
},
{
"epoch": 1.56,
"learning_rate": 0.0002027062236122014,
"loss": 1.7739,
"step": 253
},
{
"epoch": 1.57,
"learning_rate": 0.00020200526915463107,
"loss": 1.7746,
"step": 254
},
{
"epoch": 1.58,
"learning_rate": 0.00020130302149885031,
"loss": 1.7675,
"step": 255
},
{
"epoch": 1.58,
"learning_rate": 0.0002005994981074245,
"loss": 1.737,
"step": 256
},
{
"epoch": 1.59,
"learning_rate": 0.0001998947164746423,
"loss": 1.8318,
"step": 257
},
{
"epoch": 1.59,
"learning_rate": 0.00019918869412608066,
"loss": 1.7959,
"step": 258
},
{
"epoch": 1.6,
"learning_rate": 0.00019848144861816898,
"loss": 1.7567,
"step": 259
},
{
"epoch": 1.61,
"learning_rate": 0.00019777299753775265,
"loss": 1.7896,
"step": 260
},
{
"epoch": 1.61,
"learning_rate": 0.0001970633585016556,
"loss": 1.7706,
"step": 261
},
{
"epoch": 1.62,
"learning_rate": 0.0001963525491562421,
"loss": 1.7561,
"step": 262
},
{
"epoch": 1.63,
"learning_rate": 0.00019564058717697847,
"loss": 1.8023,
"step": 263
},
{
"epoch": 1.63,
"learning_rate": 0.00019492749026799288,
"loss": 1.8121,
"step": 264
},
{
"epoch": 1.64,
"learning_rate": 0.00019421327616163563,
"loss": 1.791,
"step": 265
},
{
"epoch": 1.64,
"learning_rate": 0.00019349796261803793,
"loss": 1.7758,
"step": 266
},
{
"epoch": 1.65,
"learning_rate": 0.00019278156742467032,
"loss": 1.755,
"step": 267
},
{
"epoch": 1.66,
"learning_rate": 0.0001920641083959004,
"loss": 1.6986,
"step": 268
},
{
"epoch": 1.66,
"learning_rate": 0.00019134560337254986,
"loss": 1.7569,
"step": 269
},
{
"epoch": 1.67,
"learning_rate": 0.00019062607022145078,
"loss": 1.7849,
"step": 270
},
{
"epoch": 1.68,
"learning_rate": 0.00018990552683500125,
"loss": 1.7833,
"step": 271
},
{
"epoch": 1.68,
"learning_rate": 0.00018918399113072076,
"loss": 1.7739,
"step": 272
},
{
"epoch": 1.69,
"learning_rate": 0.00018846148105080424,
"loss": 1.7765,
"step": 273
},
{
"epoch": 1.69,
"learning_rate": 0.00018773801456167628,
"loss": 1.6825,
"step": 274
},
{
"epoch": 1.7,
"learning_rate": 0.00018701360965354402,
"loss": 1.758,
"step": 275
},
{
"epoch": 1.71,
"learning_rate": 0.00018628828433995013,
"loss": 1.7477,
"step": 276
},
{
"epoch": 1.71,
"learning_rate": 0.00018556205665732462,
"loss": 1.7757,
"step": 277
},
{
"epoch": 1.72,
"learning_rate": 0.00018483494466453636,
"loss": 1.7946,
"step": 278
},
{
"epoch": 1.73,
"learning_rate": 0.0001841069664424442,
"loss": 1.7968,
"step": 279
},
{
"epoch": 1.73,
"learning_rate": 0.00018337814009344714,
"loss": 1.7296,
"step": 280
},
{
"epoch": 1.73,
"eval_loss": 1.8629968166351318,
"eval_runtime": 293.6177,
"eval_samples_per_second": 203.363,
"eval_steps_per_second": 203.363,
"step": 280
},
{
"epoch": 1.74,
"learning_rate": 0.00018264848374103433,
"loss": 1.7516,
"step": 281
},
{
"epoch": 1.74,
"learning_rate": 0.00018191801552933432,
"loss": 1.7466,
"step": 282
},
{
"epoch": 1.75,
"learning_rate": 0.00018118675362266385,
"loss": 1.7304,
"step": 283
},
{
"epoch": 1.76,
"learning_rate": 0.0001804547162050764,
"loss": 1.7519,
"step": 284
},
{
"epoch": 1.76,
"learning_rate": 0.0001797219214799096,
"loss": 1.7721,
"step": 285
},
{
"epoch": 1.77,
"learning_rate": 0.00017898838766933298,
"loss": 1.7455,
"step": 286
},
{
"epoch": 1.78,
"learning_rate": 0.00017825413301389453,
"loss": 1.7674,
"step": 287
},
{
"epoch": 1.78,
"learning_rate": 0.00017751917577206734,
"loss": 1.7581,
"step": 288
},
{
"epoch": 1.79,
"learning_rate": 0.00017678353421979548,
"loss": 1.7714,
"step": 289
},
{
"epoch": 1.79,
"learning_rate": 0.00017604722665003956,
"loss": 1.7696,
"step": 290
},
{
"epoch": 1.8,
"learning_rate": 0.0001753102713723217,
"loss": 1.7903,
"step": 291
},
{
"epoch": 1.81,
"learning_rate": 0.00017457268671227063,
"loss": 1.7563,
"step": 292
},
{
"epoch": 1.81,
"learning_rate": 0.00017383449101116547,
"loss": 1.7828,
"step": 293
},
{
"epoch": 1.82,
"learning_rate": 0.00017309570262548,
"loss": 1.7417,
"step": 294
},
{
"epoch": 1.83,
"learning_rate": 0.00017235633992642615,
"loss": 1.7491,
"step": 295
},
{
"epoch": 1.83,
"learning_rate": 0.000171616421299497,
"loss": 1.7316,
"step": 296
},
{
"epoch": 1.84,
"learning_rate": 0.0001708759651440098,
"loss": 1.785,
"step": 297
},
{
"epoch": 1.84,
"learning_rate": 0.00017013498987264832,
"loss": 1.7413,
"step": 298
},
{
"epoch": 1.85,
"learning_rate": 0.00016939351391100497,
"loss": 1.7874,
"step": 299
},
{
"epoch": 1.86,
"learning_rate": 0.00016865155569712278,
"loss": 1.7377,
"step": 300
},
{
"epoch": 1.86,
"learning_rate": 0.0001679091336810366,
"loss": 1.7814,
"step": 301
},
{
"epoch": 1.87,
"learning_rate": 0.00016716626632431477,
"loss": 1.7343,
"step": 302
},
{
"epoch": 1.88,
"learning_rate": 0.00016642297209959955,
"loss": 1.7549,
"step": 303
},
{
"epoch": 1.88,
"learning_rate": 0.000165679269490148,
"loss": 1.8005,
"step": 304
},
{
"epoch": 1.89,
"learning_rate": 0.0001649351769893725,
"loss": 1.8071,
"step": 305
},
{
"epoch": 1.89,
"learning_rate": 0.00016419071310038057,
"loss": 1.777,
"step": 306
},
{
"epoch": 1.9,
"learning_rate": 0.00016344589633551502,
"loss": 1.7851,
"step": 307
},
{
"epoch": 1.91,
"learning_rate": 0.00016270074521589347,
"loss": 1.7768,
"step": 308
},
{
"epoch": 1.91,
"learning_rate": 0.00016195527827094787,
"loss": 1.7751,
"step": 309
},
{
"epoch": 1.92,
"learning_rate": 0.00016120951403796364,
"loss": 1.7342,
"step": 310
},
{
"epoch": 1.93,
"learning_rate": 0.00016046347106161876,
"loss": 1.7465,
"step": 311
},
{
"epoch": 1.93,
"learning_rate": 0.00015971716789352274,
"loss": 1.7957,
"step": 312
},
{
"epoch": 1.94,
"learning_rate": 0.00015897062309175512,
"loss": 1.785,
"step": 313
},
{
"epoch": 1.94,
"learning_rate": 0.000158223855220404,
"loss": 1.7575,
"step": 314
},
{
"epoch": 1.95,
"learning_rate": 0.00015747688284910457,
"loss": 1.7998,
"step": 315
},
{
"epoch": 1.96,
"learning_rate": 0.00015672972455257723,
"loss": 1.7767,
"step": 316
},
{
"epoch": 1.96,
"learning_rate": 0.00015598239891016574,
"loss": 1.7811,
"step": 317
},
{
"epoch": 1.97,
"learning_rate": 0.00015523492450537517,
"loss": 1.7671,
"step": 318
},
{
"epoch": 1.98,
"learning_rate": 0.00015448731992540976,
"loss": 1.7707,
"step": 319
},
{
"epoch": 1.98,
"learning_rate": 0.00015373960376071093,
"loss": 1.7737,
"step": 320
},
{
"epoch": 1.98,
"eval_loss": 1.8478573560714722,
"eval_runtime": 291.1645,
"eval_samples_per_second": 205.077,
"eval_steps_per_second": 205.077,
"step": 320
},
{
"epoch": 1.99,
"learning_rate": 0.0001529917946044947,
"loss": 1.7395,
"step": 321
},
{
"epoch": 1.99,
"learning_rate": 0.00015224391105228953,
"loss": 1.7596,
"step": 322
},
{
"epoch": 2.0,
"learning_rate": 0.00015149597170147387,
"loss": 1.7475,
"step": 323
},
{
"epoch": 2.01,
"learning_rate": 0.0001507479951508137,
"loss": 1.7828,
"step": 324
},
{
"epoch": 2.01,
"learning_rate": 0.00015,
"loss": 1.7394,
"step": 325
},
{
"epoch": 2.0,
"learning_rate": 0.0001492520048491863,
"loss": 1.6713,
"step": 326
},
{
"epoch": 2.01,
"learning_rate": 0.0001485040282985261,
"loss": 1.5788,
"step": 327
},
{
"epoch": 2.02,
"learning_rate": 0.00014775608894771047,
"loss": 1.6083,
"step": 328
},
{
"epoch": 2.02,
"learning_rate": 0.0001470082053955053,
"loss": 1.5962,
"step": 329
},
{
"epoch": 2.03,
"learning_rate": 0.00014626039623928907,
"loss": 1.564,
"step": 330
},
{
"epoch": 2.04,
"learning_rate": 0.00014551268007459024,
"loss": 1.5529,
"step": 331
},
{
"epoch": 2.04,
"learning_rate": 0.0001447650754946249,
"loss": 1.5699,
"step": 332
},
{
"epoch": 2.05,
"learning_rate": 0.0001440176010898343,
"loss": 1.5667,
"step": 333
},
{
"epoch": 2.05,
"learning_rate": 0.0001432702754474228,
"loss": 1.5275,
"step": 334
},
{
"epoch": 2.06,
"learning_rate": 0.0001425231171508954,
"loss": 1.5585,
"step": 335
},
{
"epoch": 2.07,
"learning_rate": 0.00014177614477959595,
"loss": 1.5753,
"step": 336
},
{
"epoch": 2.07,
"learning_rate": 0.00014102937690824486,
"loss": 1.5479,
"step": 337
},
{
"epoch": 2.08,
"learning_rate": 0.00014028283210647718,
"loss": 1.5721,
"step": 338
},
{
"epoch": 2.09,
"learning_rate": 0.00013953652893838119,
"loss": 1.5526,
"step": 339
},
{
"epoch": 2.09,
"learning_rate": 0.00013879048596203636,
"loss": 1.5777,
"step": 340
},
{
"epoch": 2.1,
"learning_rate": 0.00013804472172905213,
"loss": 1.5216,
"step": 341
},
{
"epoch": 2.1,
"learning_rate": 0.0001372992547841065,
"loss": 1.5776,
"step": 342
},
{
"epoch": 2.11,
"learning_rate": 0.00013655410366448498,
"loss": 1.5717,
"step": 343
},
{
"epoch": 2.12,
"learning_rate": 0.00013580928689961943,
"loss": 1.5628,
"step": 344
},
{
"epoch": 2.12,
"learning_rate": 0.0001350648230106275,
"loss": 1.5333,
"step": 345
},
{
"epoch": 2.13,
"learning_rate": 0.000134320730509852,
"loss": 1.5271,
"step": 346
},
{
"epoch": 2.14,
"learning_rate": 0.00013357702790040048,
"loss": 1.5672,
"step": 347
},
{
"epoch": 2.14,
"learning_rate": 0.00013283373367568523,
"loss": 1.5619,
"step": 348
},
{
"epoch": 2.15,
"learning_rate": 0.00013209086631896336,
"loss": 1.5461,
"step": 349
},
{
"epoch": 2.15,
"learning_rate": 0.00013134844430287725,
"loss": 1.5724,
"step": 350
},
{
"epoch": 2.16,
"learning_rate": 0.00013060648608899503,
"loss": 1.5292,
"step": 351
},
{
"epoch": 2.17,
"learning_rate": 0.0001298650101273517,
"loss": 1.5703,
"step": 352
},
{
"epoch": 2.17,
"learning_rate": 0.0001291240348559902,
"loss": 1.5444,
"step": 353
},
{
"epoch": 2.18,
"learning_rate": 0.000128383578700503,
"loss": 1.5848,
"step": 354
},
{
"epoch": 2.19,
"learning_rate": 0.0001276436600735738,
"loss": 1.603,
"step": 355
},
{
"epoch": 2.19,
"learning_rate": 0.00012690429737451992,
"loss": 1.5961,
"step": 356
},
{
"epoch": 2.2,
"learning_rate": 0.0001261655089888345,
"loss": 1.5635,
"step": 357
},
{
"epoch": 2.2,
"learning_rate": 0.00012542731328772934,
"loss": 1.5743,
"step": 358
},
{
"epoch": 2.21,
"learning_rate": 0.00012468972862767825,
"loss": 1.5291,
"step": 359
},
{
"epoch": 2.22,
"learning_rate": 0.00012395277334996044,
"loss": 1.5871,
"step": 360
},
{
"epoch": 2.22,
"eval_loss": 1.8882598876953125,
"eval_runtime": 292.5066,
"eval_samples_per_second": 204.136,
"eval_steps_per_second": 204.136,
"step": 360
},
{
"epoch": 2.22,
"learning_rate": 0.0001232164657802045,
"loss": 1.5044,
"step": 361
},
{
"epoch": 2.23,
"learning_rate": 0.00012248082422793266,
"loss": 1.5682,
"step": 362
},
{
"epoch": 2.24,
"learning_rate": 0.00012174586698610547,
"loss": 1.564,
"step": 363
},
{
"epoch": 2.24,
"learning_rate": 0.00012101161233066703,
"loss": 1.5595,
"step": 364
},
{
"epoch": 2.25,
"learning_rate": 0.00012027807852009038,
"loss": 1.5228,
"step": 365
},
{
"epoch": 2.25,
"learning_rate": 0.00011954528379492359,
"loss": 1.5444,
"step": 366
},
{
"epoch": 2.26,
"learning_rate": 0.00011881324637733611,
"loss": 1.5731,
"step": 367
},
{
"epoch": 2.27,
"learning_rate": 0.0001180819844706657,
"loss": 1.5853,
"step": 368
},
{
"epoch": 2.27,
"learning_rate": 0.00011735151625896565,
"loss": 1.583,
"step": 369
},
{
"epoch": 2.28,
"learning_rate": 0.00011662185990655284,
"loss": 1.5859,
"step": 370
},
{
"epoch": 2.29,
"learning_rate": 0.00011589303355755579,
"loss": 1.5597,
"step": 371
},
{
"epoch": 2.29,
"learning_rate": 0.00011516505533546363,
"loss": 1.5692,
"step": 372
},
{
"epoch": 2.3,
"learning_rate": 0.00011443794334267538,
"loss": 1.5628,
"step": 373
},
{
"epoch": 2.3,
"learning_rate": 0.00011371171566004985,
"loss": 1.5875,
"step": 374
},
{
"epoch": 2.31,
"learning_rate": 0.00011298639034645593,
"loss": 1.545,
"step": 375
},
{
"epoch": 2.32,
"learning_rate": 0.00011226198543832372,
"loss": 1.5687,
"step": 376
},
{
"epoch": 2.32,
"learning_rate": 0.00011153851894919574,
"loss": 1.5379,
"step": 377
},
{
"epoch": 2.33,
"learning_rate": 0.00011081600886927924,
"loss": 1.5287,
"step": 378
},
{
"epoch": 2.34,
"learning_rate": 0.00011009447316499873,
"loss": 1.5763,
"step": 379
},
{
"epoch": 2.34,
"learning_rate": 0.00010937392977854923,
"loss": 1.5675,
"step": 380
},
{
"epoch": 2.35,
"learning_rate": 0.00010865439662745013,
"loss": 1.5487,
"step": 381
},
{
"epoch": 2.35,
"learning_rate": 0.0001079358916040996,
"loss": 1.5471,
"step": 382
},
{
"epoch": 2.36,
"learning_rate": 0.00010721843257532968,
"loss": 1.5631,
"step": 383
},
{
"epoch": 2.37,
"learning_rate": 0.00010650203738196206,
"loss": 1.5721,
"step": 384
},
{
"epoch": 2.37,
"learning_rate": 0.00010578672383836435,
"loss": 1.5811,
"step": 385
},
{
"epoch": 2.38,
"learning_rate": 0.0001050725097320071,
"loss": 1.5824,
"step": 386
},
{
"epoch": 2.39,
"learning_rate": 0.00010435941282302154,
"loss": 1.5706,
"step": 387
},
{
"epoch": 2.39,
"learning_rate": 0.0001036474508437579,
"loss": 1.5383,
"step": 388
},
{
"epoch": 2.4,
"learning_rate": 0.00010293664149834444,
"loss": 1.5719,
"step": 389
},
{
"epoch": 2.4,
"learning_rate": 0.00010222700246224735,
"loss": 1.5167,
"step": 390
},
{
"epoch": 2.41,
"learning_rate": 0.00010151855138183102,
"loss": 1.5552,
"step": 391
},
{
"epoch": 2.42,
"learning_rate": 0.00010081130587391934,
"loss": 1.5735,
"step": 392
},
{
"epoch": 2.42,
"learning_rate": 0.00010010528352535771,
"loss": 1.5604,
"step": 393
},
{
"epoch": 2.43,
"learning_rate": 9.94005018925755e-05,
"loss": 1.5332,
"step": 394
},
{
"epoch": 2.43,
"learning_rate": 9.869697850114969e-05,
"loss": 1.5617,
"step": 395
},
{
"epoch": 2.44,
"learning_rate": 9.799473084536891e-05,
"loss": 1.5573,
"step": 396
},
{
"epoch": 2.45,
"learning_rate": 9.729377638779857e-05,
"loss": 1.5541,
"step": 397
},
{
"epoch": 2.45,
"learning_rate": 9.659413255884647e-05,
"loss": 1.5915,
"step": 398
},
{
"epoch": 2.46,
"learning_rate": 9.589581675632944e-05,
"loss": 1.5829,
"step": 399
},
{
"epoch": 2.47,
"learning_rate": 9.519884634504074e-05,
"loss": 1.5339,
"step": 400
},
{
"epoch": 2.47,
"eval_loss": 1.8761346340179443,
"eval_runtime": 290.7874,
"eval_samples_per_second": 205.342,
"eval_steps_per_second": 205.342,
"step": 400
},
{
"epoch": 2.47,
"learning_rate": 9.450323865631816e-05,
"loss": 1.5532,
"step": 401
},
{
"epoch": 2.48,
"learning_rate": 9.380901098761319e-05,
"loss": 1.5511,
"step": 402
},
{
"epoch": 2.48,
"learning_rate": 9.311618060206074e-05,
"loss": 1.5456,
"step": 403
},
{
"epoch": 2.49,
"learning_rate": 9.242476472804995e-05,
"loss": 1.5488,
"step": 404
},
{
"epoch": 2.5,
"learning_rate": 9.17347805587958e-05,
"loss": 1.5898,
"step": 405
},
{
"epoch": 2.5,
"learning_rate": 9.104624525191145e-05,
"loss": 1.5482,
"step": 406
},
{
"epoch": 2.51,
"learning_rate": 9.035917592898177e-05,
"loss": 1.5348,
"step": 407
},
{
"epoch": 2.52,
"learning_rate": 8.967358967513738e-05,
"loss": 1.5588,
"step": 408
},
{
"epoch": 2.52,
"learning_rate": 8.898950353862998e-05,
"loss": 1.5553,
"step": 409
},
{
"epoch": 2.53,
"learning_rate": 8.830693453040829e-05,
"loss": 1.5642,
"step": 410
},
{
"epoch": 2.53,
"learning_rate": 8.762589962369511e-05,
"loss": 1.5557,
"step": 411
},
{
"epoch": 2.54,
"learning_rate": 8.694641575356519e-05,
"loss": 1.5578,
"step": 412
},
{
"epoch": 2.55,
"learning_rate": 8.626849981652424e-05,
"loss": 1.574,
"step": 413
},
{
"epoch": 2.55,
"learning_rate": 8.55921686700886e-05,
"loss": 1.5666,
"step": 414
},
{
"epoch": 2.56,
"learning_rate": 8.491743913236628e-05,
"loss": 1.5562,
"step": 415
},
{
"epoch": 2.57,
"learning_rate": 8.424432798163836e-05,
"loss": 1.5529,
"step": 416
},
{
"epoch": 2.57,
"learning_rate": 8.357285195594228e-05,
"loss": 1.538,
"step": 417
},
{
"epoch": 2.58,
"learning_rate": 8.290302775265509e-05,
"loss": 1.5782,
"step": 418
},
{
"epoch": 2.58,
"learning_rate": 8.223487202807877e-05,
"loss": 1.55,
"step": 419
},
{
"epoch": 2.59,
"learning_rate": 8.156840139702554e-05,
"loss": 1.5439,
"step": 420
},
{
"epoch": 2.6,
"learning_rate": 8.090363243240517e-05,
"loss": 1.5627,
"step": 421
},
{
"epoch": 2.6,
"learning_rate": 8.024058166481243e-05,
"loss": 1.5693,
"step": 422
},
{
"epoch": 2.61,
"learning_rate": 7.957926558211642e-05,
"loss": 1.5816,
"step": 423
},
{
"epoch": 2.62,
"learning_rate": 7.89197006290502e-05,
"loss": 1.5464,
"step": 424
},
{
"epoch": 2.62,
"learning_rate": 7.82619032068023e-05,
"loss": 1.5494,
"step": 425
},
{
"epoch": 2.63,
"learning_rate": 7.760588967260838e-05,
"loss": 1.5711,
"step": 426
},
{
"epoch": 2.63,
"learning_rate": 7.69516763393449e-05,
"loss": 1.5801,
"step": 427
},
{
"epoch": 2.64,
"learning_rate": 7.629927947512331e-05,
"loss": 1.5628,
"step": 428
},
{
"epoch": 2.65,
"learning_rate": 7.564871530288536e-05,
"loss": 1.5601,
"step": 429
},
{
"epoch": 2.65,
"learning_rate": 7.500000000000002e-05,
"loss": 1.5453,
"step": 430
},
{
"epoch": 2.66,
"learning_rate": 7.435314969786088e-05,
"loss": 1.5684,
"step": 431
},
{
"epoch": 2.67,
"learning_rate": 7.370818048148527e-05,
"loss": 1.5656,
"step": 432
},
{
"epoch": 2.67,
"learning_rate": 7.30651083891141e-05,
"loss": 1.5735,
"step": 433
},
{
"epoch": 2.68,
"learning_rate": 7.242394941181308e-05,
"loss": 1.5587,
"step": 434
},
{
"epoch": 2.68,
"learning_rate": 7.17847194930753e-05,
"loss": 1.5735,
"step": 435
},
{
"epoch": 2.69,
"learning_rate": 7.114743452842427e-05,
"loss": 1.5656,
"step": 436
},
{
"epoch": 2.7,
"learning_rate": 7.051211036501928e-05,
"loss": 1.5228,
"step": 437
},
{
"epoch": 2.7,
"learning_rate": 6.987876280126068e-05,
"loss": 1.5452,
"step": 438
},
{
"epoch": 2.71,
"learning_rate": 6.924740758639768e-05,
"loss": 1.5777,
"step": 439
},
{
"epoch": 2.72,
"learning_rate": 6.86180604201361e-05,
"loss": 1.5589,
"step": 440
},
{
"epoch": 2.72,
"eval_loss": 1.8656864166259766,
"eval_runtime": 292.9263,
"eval_samples_per_second": 203.843,
"eval_steps_per_second": 203.843,
"step": 440
},
{
"epoch": 2.72,
"learning_rate": 6.799073695224846e-05,
"loss": 1.562,
"step": 441
},
{
"epoch": 2.73,
"learning_rate": 6.736545278218463e-05,
"loss": 1.5788,
"step": 442
},
{
"epoch": 2.73,
"learning_rate": 6.674222345868376e-05,
"loss": 1.5532,
"step": 443
},
{
"epoch": 2.74,
"learning_rate": 6.612106447938799e-05,
"loss": 1.5986,
"step": 444
},
{
"epoch": 2.75,
"learning_rate": 6.550199129045668e-05,
"loss": 1.573,
"step": 445
},
{
"epoch": 2.75,
"learning_rate": 6.488501928618274e-05,
"loss": 1.5544,
"step": 446
},
{
"epoch": 2.76,
"learning_rate": 6.427016380860937e-05,
"loss": 1.5651,
"step": 447
},
{
"epoch": 2.77,
"learning_rate": 6.365744014714898e-05,
"loss": 1.5261,
"step": 448
},
{
"epoch": 2.77,
"learning_rate": 6.304686353820266e-05,
"loss": 1.5325,
"step": 449
},
{
"epoch": 2.78,
"learning_rate": 6.243844916478155e-05,
"loss": 1.5435,
"step": 450
},
{
"epoch": 2.78,
"learning_rate": 6.183221215612904e-05,
"loss": 1.546,
"step": 451
},
{
"epoch": 2.79,
"learning_rate": 6.122816758734487e-05,
"loss": 1.5508,
"step": 452
},
{
"epoch": 2.8,
"learning_rate": 6.0626330479009845e-05,
"loss": 1.5678,
"step": 453
},
{
"epoch": 2.8,
"learning_rate": 6.002671579681294e-05,
"loss": 1.523,
"step": 454
},
{
"epoch": 2.81,
"learning_rate": 5.9429338451178355e-05,
"loss": 1.5271,
"step": 455
},
{
"epoch": 2.82,
"learning_rate": 5.88342132968955e-05,
"loss": 1.5285,
"step": 456
},
{
"epoch": 2.82,
"learning_rate": 5.824135513274902e-05,
"loss": 1.5476,
"step": 457
},
{
"epoch": 2.83,
"learning_rate": 5.765077870115125e-05,
"loss": 1.5216,
"step": 458
},
{
"epoch": 2.83,
"learning_rate": 5.706249868777526e-05,
"loss": 1.547,
"step": 459
},
{
"epoch": 2.84,
"learning_rate": 5.6476529721189974e-05,
"loss": 1.5398,
"step": 460
},
{
"epoch": 2.85,
"learning_rate": 5.589288637249612e-05,
"loss": 1.579,
"step": 461
},
{
"epoch": 2.85,
"learning_rate": 5.531158315496417e-05,
"loss": 1.4801,
"step": 462
},
{
"epoch": 2.86,
"learning_rate": 5.473263452367318e-05,
"loss": 1.5465,
"step": 463
},
{
"epoch": 2.87,
"learning_rate": 5.415605487515164e-05,
"loss": 1.5303,
"step": 464
},
{
"epoch": 2.87,
"learning_rate": 5.358185854701909e-05,
"loss": 1.5519,
"step": 465
},
{
"epoch": 2.88,
"learning_rate": 5.3010059817630066e-05,
"loss": 1.5818,
"step": 466
},
{
"epoch": 2.88,
"learning_rate": 5.244067290571856e-05,
"loss": 1.5413,
"step": 467
},
{
"epoch": 2.89,
"learning_rate": 5.187371197004485e-05,
"loss": 1.5372,
"step": 468
},
{
"epoch": 2.9,
"learning_rate": 5.130919110904311e-05,
"loss": 1.5459,
"step": 469
},
{
"epoch": 2.9,
"learning_rate": 5.074712436047112e-05,
"loss": 1.5488,
"step": 470
},
{
"epoch": 2.91,
"learning_rate": 5.018752570106086e-05,
"loss": 1.5332,
"step": 471
},
{
"epoch": 2.92,
"learning_rate": 4.963040904617131e-05,
"loss": 1.5684,
"step": 472
},
{
"epoch": 2.92,
"learning_rate": 4.9075788249442024e-05,
"loss": 1.5405,
"step": 473
},
{
"epoch": 2.93,
"learning_rate": 4.852367710244921e-05,
"loss": 1.5361,
"step": 474
},
{
"epoch": 2.93,
"learning_rate": 4.7974089334362057e-05,
"loss": 1.5219,
"step": 475
},
{
"epoch": 2.94,
"learning_rate": 4.742703861160198e-05,
"loss": 1.5518,
"step": 476
},
{
"epoch": 2.95,
"learning_rate": 4.688253853750227e-05,
"loss": 1.5699,
"step": 477
},
{
"epoch": 2.95,
"learning_rate": 4.63406026519703e-05,
"loss": 1.5368,
"step": 478
},
{
"epoch": 2.96,
"learning_rate": 4.5801244431150394e-05,
"loss": 1.5286,
"step": 479
},
{
"epoch": 2.96,
"learning_rate": 4.526447728708908e-05,
"loss": 1.5651,
"step": 480
},
{
"epoch": 2.96,
"eval_loss": 1.8590147495269775,
"eval_runtime": 291.2621,
"eval_samples_per_second": 205.008,
"eval_steps_per_second": 205.008,
"step": 480
},
{
"epoch": 2.97,
"learning_rate": 4.4730314567401275e-05,
"loss": 1.5242,
"step": 481
},
{
"epoch": 2.98,
"learning_rate": 4.419876955493869e-05,
"loss": 1.5424,
"step": 482
},
{
"epoch": 2.98,
"learning_rate": 4.3669855467459144e-05,
"loss": 1.5329,
"step": 483
},
{
"epoch": 2.99,
"learning_rate": 4.31435854572983e-05,
"loss": 1.5417,
"step": 484
},
{
"epoch": 3.0,
"learning_rate": 4.261997261104223e-05,
"loss": 1.5558,
"step": 485
},
{
"epoch": 3.0,
"learning_rate": 4.209902994920235e-05,
"loss": 1.5102,
"step": 486
},
{
"epoch": 3.01,
"learning_rate": 4.158077042589128e-05,
"loss": 1.5143,
"step": 487
},
{
"epoch": 3.0,
"learning_rate": 4.1065206928501055e-05,
"loss": 1.5588,
"step": 488
},
{
"epoch": 3.01,
"learning_rate": 4.055235227738237e-05,
"loss": 1.375,
"step": 489
},
{
"epoch": 3.01,
"learning_rate": 4.004221922552608e-05,
"loss": 1.3812,
"step": 490
},
{
"epoch": 3.02,
"learning_rate": 3.953482045824573e-05,
"loss": 1.3902,
"step": 491
},
{
"epoch": 3.03,
"learning_rate": 3.903016859286243e-05,
"loss": 1.3749,
"step": 492
},
{
"epoch": 3.03,
"learning_rate": 3.852827617839084e-05,
"loss": 1.3684,
"step": 493
},
{
"epoch": 3.04,
"learning_rate": 3.8029155695227474e-05,
"loss": 1.383,
"step": 494
},
{
"epoch": 3.04,
"learning_rate": 3.753281955483985e-05,
"loss": 1.3497,
"step": 495
},
{
"epoch": 3.05,
"learning_rate": 3.7039280099458366e-05,
"loss": 1.3742,
"step": 496
},
{
"epoch": 3.06,
"learning_rate": 3.654854960176895e-05,
"loss": 1.3838,
"step": 497
},
{
"epoch": 3.06,
"learning_rate": 3.60606402646083e-05,
"loss": 1.3758,
"step": 498
},
{
"epoch": 3.07,
"learning_rate": 3.557556422066002e-05,
"loss": 1.365,
"step": 499
},
{
"epoch": 3.08,
"learning_rate": 3.509333353215331e-05,
"loss": 1.3621,
"step": 500
},
{
"epoch": 3.08,
"learning_rate": 3.4613960190562696e-05,
"loss": 1.3515,
"step": 501
},
{
"epoch": 3.09,
"learning_rate": 3.413745611631009e-05,
"loss": 1.3561,
"step": 502
},
{
"epoch": 3.09,
"learning_rate": 3.366383315846815e-05,
"loss": 1.3699,
"step": 503
},
{
"epoch": 3.1,
"learning_rate": 3.3193103094465906e-05,
"loss": 1.3388,
"step": 504
},
{
"epoch": 3.11,
"learning_rate": 3.2725277629795526e-05,
"loss": 1.3341,
"step": 505
},
{
"epoch": 3.11,
"learning_rate": 3.226036839772165e-05,
"loss": 1.3279,
"step": 506
},
{
"epoch": 3.12,
"learning_rate": 3.1798386958991714e-05,
"loss": 1.374,
"step": 507
},
{
"epoch": 3.13,
"learning_rate": 3.133934480154885e-05,
"loss": 1.3762,
"step": 508
},
{
"epoch": 3.13,
"learning_rate": 3.0883253340245845e-05,
"loss": 1.3613,
"step": 509
},
{
"epoch": 3.14,
"learning_rate": 3.0430123916561672e-05,
"loss": 1.3593,
"step": 510
},
{
"epoch": 3.14,
"learning_rate": 2.997996779831907e-05,
"loss": 1.3717,
"step": 511
},
{
"epoch": 3.15,
"learning_rate": 2.953279617940478e-05,
"loss": 1.3379,
"step": 512
},
{
"epoch": 3.16,
"learning_rate": 2.9088620179490675e-05,
"loss": 1.3429,
"step": 513
},
{
"epoch": 3.16,
"learning_rate": 2.8647450843757897e-05,
"loss": 1.3596,
"step": 514
},
{
"epoch": 3.17,
"learning_rate": 2.8209299142621522e-05,
"loss": 1.3652,
"step": 515
},
{
"epoch": 3.18,
"learning_rate": 2.7774175971458283e-05,
"loss": 1.3752,
"step": 516
},
{
"epoch": 3.18,
"learning_rate": 2.7342092150335292e-05,
"loss": 1.3611,
"step": 517
},
{
"epoch": 3.19,
"learning_rate": 2.691305842374128e-05,
"loss": 1.3731,
"step": 518
},
{
"epoch": 3.19,
"learning_rate": 2.648708546031911e-05,
"loss": 1.3582,
"step": 519
},
{
"epoch": 3.2,
"learning_rate": 2.6064183852600797e-05,
"loss": 1.3134,
"step": 520
},
{
"epoch": 3.2,
"eval_loss": 1.949735164642334,
"eval_runtime": 295.0075,
"eval_samples_per_second": 202.405,
"eval_steps_per_second": 202.405,
"step": 520
},
{
"epoch": 3.21,
"learning_rate": 2.5644364116743755e-05,
"loss": 1.38,
"step": 521
},
{
"epoch": 3.21,
"learning_rate": 2.5227636692269688e-05,
"loss": 1.3663,
"step": 522
},
{
"epoch": 3.22,
"learning_rate": 2.48140119418046e-05,
"loss": 1.3502,
"step": 523
},
{
"epoch": 3.23,
"learning_rate": 2.440350015082152e-05,
"loss": 1.3493,
"step": 524
},
{
"epoch": 3.23,
"learning_rate": 2.3996111527384288e-05,
"loss": 1.3288,
"step": 525
},
{
"epoch": 3.24,
"learning_rate": 2.359185620189412e-05,
"loss": 1.3119,
"step": 526
},
{
"epoch": 3.24,
"learning_rate": 2.31907442268375e-05,
"loss": 1.3279,
"step": 527
},
{
"epoch": 3.25,
"learning_rate": 2.2792785576536105e-05,
"loss": 1.332,
"step": 528
},
{
"epoch": 3.26,
"learning_rate": 2.239799014689908e-05,
"loss": 1.357,
"step": 529
},
{
"epoch": 3.26,
"learning_rate": 2.2006367755176655e-05,
"loss": 1.3447,
"step": 530
},
{
"epoch": 3.27,
"learning_rate": 2.1617928139716247e-05,
"loss": 1.3639,
"step": 531
},
{
"epoch": 3.28,
"learning_rate": 2.1232680959720082e-05,
"loss": 1.33,
"step": 532
},
{
"epoch": 3.28,
"learning_rate": 2.0850635795005262e-05,
"loss": 1.3419,
"step": 533
},
{
"epoch": 3.29,
"learning_rate": 2.0471802145765376e-05,
"loss": 1.3326,
"step": 534
},
{
"epoch": 3.29,
"learning_rate": 2.009618943233419e-05,
"loss": 1.347,
"step": 535
},
{
"epoch": 3.3,
"learning_rate": 1.9723806994951675e-05,
"loss": 1.3543,
"step": 536
},
{
"epoch": 3.31,
"learning_rate": 1.935466409353139e-05,
"loss": 1.3428,
"step": 537
},
{
"epoch": 3.31,
"learning_rate": 1.8988769907430552e-05,
"loss": 1.3416,
"step": 538
},
{
"epoch": 3.32,
"learning_rate": 1.8626133535221517e-05,
"loss": 1.3571,
"step": 539
},
{
"epoch": 3.33,
"learning_rate": 1.82667639944657e-05,
"loss": 1.3599,
"step": 540
},
{
"epoch": 3.33,
"learning_rate": 1.79106702214893e-05,
"loss": 1.3225,
"step": 541
},
{
"epoch": 3.34,
"learning_rate": 1.755786107116095e-05,
"loss": 1.3576,
"step": 542
},
{
"epoch": 3.34,
"learning_rate": 1.7208345316671747e-05,
"loss": 1.3816,
"step": 543
},
{
"epoch": 3.35,
"learning_rate": 1.6862131649316884e-05,
"loss": 1.3527,
"step": 544
},
{
"epoch": 3.36,
"learning_rate": 1.6519228678279718e-05,
"loss": 1.3533,
"step": 545
},
{
"epoch": 3.36,
"learning_rate": 1.6179644930417497e-05,
"loss": 1.348,
"step": 546
},
{
"epoch": 3.37,
"learning_rate": 1.5843388850049498e-05,
"loss": 1.3487,
"step": 547
},
{
"epoch": 3.37,
"learning_rate": 1.5510468798746878e-05,
"loss": 1.3597,
"step": 548
},
{
"epoch": 3.38,
"learning_rate": 1.5180893055124977e-05,
"loss": 1.3334,
"step": 549
},
{
"epoch": 3.39,
"learning_rate": 1.4854669814637143e-05,
"loss": 1.3969,
"step": 550
},
{
"epoch": 3.39,
"learning_rate": 1.4531807189371264e-05,
"loss": 1.3682,
"step": 551
},
{
"epoch": 3.4,
"learning_rate": 1.4212313207847786e-05,
"loss": 1.3385,
"step": 552
},
{
"epoch": 3.41,
"learning_rate": 1.3896195814820266e-05,
"loss": 1.3322,
"step": 553
},
{
"epoch": 3.41,
"learning_rate": 1.3583462871077672e-05,
"loss": 1.3438,
"step": 554
},
{
"epoch": 3.42,
"learning_rate": 1.3274122153249028e-05,
"loss": 1.3818,
"step": 555
},
{
"epoch": 3.42,
"learning_rate": 1.2968181353609852e-05,
"loss": 1.3426,
"step": 556
},
{
"epoch": 3.43,
"learning_rate": 1.2665648079891139e-05,
"loss": 1.3623,
"step": 557
},
{
"epoch": 3.44,
"learning_rate": 1.2366529855089913e-05,
"loss": 1.3458,
"step": 558
},
{
"epoch": 3.44,
"learning_rate": 1.2070834117282412e-05,
"loss": 1.3632,
"step": 559
},
{
"epoch": 3.45,
"learning_rate": 1.1778568219438839e-05,
"loss": 1.3423,
"step": 560
},
{
"epoch": 3.45,
"eval_loss": 1.9406418800354004,
"eval_runtime": 291.1096,
"eval_samples_per_second": 205.115,
"eval_steps_per_second": 205.115,
"step": 560
},
{
"epoch": 3.46,
"learning_rate": 1.1489739429240845e-05,
"loss": 1.3542,
"step": 561
},
{
"epoch": 3.46,
"learning_rate": 1.1204354928900494e-05,
"loss": 1.392,
"step": 562
},
{
"epoch": 3.47,
"learning_rate": 1.0922421814981901e-05,
"loss": 1.3587,
"step": 563
},
{
"epoch": 3.47,
"learning_rate": 1.0643947098224609e-05,
"loss": 1.3266,
"step": 564
},
{
"epoch": 3.48,
"learning_rate": 1.036893770336938e-05,
"loss": 1.3765,
"step": 565
},
{
"epoch": 3.49,
"learning_rate": 1.0097400468985855e-05,
"loss": 1.3446,
"step": 566
},
{
"epoch": 3.49,
"learning_rate": 9.829342147302683e-06,
"loss": 1.3355,
"step": 567
},
{
"epoch": 3.5,
"learning_rate": 9.564769404039419e-06,
"loss": 1.3728,
"step": 568
},
{
"epoch": 3.51,
"learning_rate": 9.303688818240957e-06,
"loss": 1.3629,
"step": 569
},
{
"epoch": 3.51,
"learning_rate": 9.046106882113751e-06,
"loss": 1.3474,
"step": 570
},
{
"epoch": 3.52,
"learning_rate": 8.792030000864536e-06,
"loss": 1.3797,
"step": 571
},
{
"epoch": 3.52,
"learning_rate": 8.541464492540912e-06,
"loss": 1.3478,
"step": 572
},
{
"epoch": 3.53,
"learning_rate": 8.294416587874425e-06,
"loss": 1.3632,
"step": 573
},
{
"epoch": 3.54,
"learning_rate": 8.050892430125361e-06,
"loss": 1.3596,
"step": 574
},
{
"epoch": 3.54,
"learning_rate": 7.810898074930243e-06,
"loss": 1.3701,
"step": 575
},
{
"epoch": 3.55,
"learning_rate": 7.574439490151046e-06,
"loss": 1.363,
"step": 576
},
{
"epoch": 3.56,
"learning_rate": 7.34152255572697e-06,
"loss": 1.3881,
"step": 577
},
{
"epoch": 3.56,
"learning_rate": 7.112153063528064e-06,
"loss": 1.3419,
"step": 578
},
{
"epoch": 3.57,
"learning_rate": 6.886336717211361e-06,
"loss": 1.3458,
"step": 579
},
{
"epoch": 3.57,
"learning_rate": 6.664079132078881e-06,
"loss": 1.3603,
"step": 580
},
{
"epoch": 3.58,
"learning_rate": 6.4453858349381544e-06,
"loss": 1.3583,
"step": 581
},
{
"epoch": 3.59,
"learning_rate": 6.230262263964641e-06,
"loss": 1.3598,
"step": 582
},
{
"epoch": 3.59,
"learning_rate": 6.018713768566658e-06,
"loss": 1.3314,
"step": 583
},
{
"epoch": 3.6,
"learning_rate": 5.810745609252165e-06,
"loss": 1.3479,
"step": 584
},
{
"epoch": 3.61,
"learning_rate": 5.606362957498195e-06,
"loss": 1.3465,
"step": 585
},
{
"epoch": 3.61,
"learning_rate": 5.405570895622013e-06,
"loss": 1.3364,
"step": 586
},
{
"epoch": 3.62,
"learning_rate": 5.208374416654909e-06,
"loss": 1.3697,
"step": 587
},
{
"epoch": 3.62,
"learning_rate": 5.014778424217924e-06,
"loss": 1.3575,
"step": 588
},
{
"epoch": 3.63,
"learning_rate": 4.8247877324000375e-06,
"loss": 1.3657,
"step": 589
},
{
"epoch": 3.64,
"learning_rate": 4.638407065638322e-06,
"loss": 1.3613,
"step": 590
},
{
"epoch": 3.64,
"learning_rate": 4.455641058600528e-06,
"loss": 1.356,
"step": 591
},
{
"epoch": 3.65,
"learning_rate": 4.276494256069873e-06,
"loss": 1.3337,
"step": 592
},
{
"epoch": 3.66,
"learning_rate": 4.10097111283198e-06,
"loss": 1.3734,
"step": 593
},
{
"epoch": 3.66,
"learning_rate": 3.929075993564052e-06,
"loss": 1.3329,
"step": 594
},
{
"epoch": 3.67,
"learning_rate": 3.760813172726457e-06,
"loss": 1.3533,
"step": 595
},
{
"epoch": 3.67,
"learning_rate": 3.5961868344563325e-06,
"loss": 1.3683,
"step": 596
},
{
"epoch": 3.68,
"learning_rate": 3.435201072463617e-06,
"loss": 1.3476,
"step": 597
},
{
"epoch": 3.69,
"learning_rate": 3.2778598899291465e-06,
"loss": 1.3682,
"step": 598
},
{
"epoch": 3.69,
"learning_rate": 3.1241671994052864e-06,
"loss": 1.3557,
"step": 599
},
{
"epoch": 3.7,
"learning_rate": 2.9741268227184255e-06,
"loss": 1.3635,
"step": 600
},
{
"epoch": 3.7,
"eval_loss": 1.9362093210220337,
"eval_runtime": 291.4007,
"eval_samples_per_second": 204.91,
"eval_steps_per_second": 204.91,
"step": 600
},
{
"epoch": 3.71,
"learning_rate": 2.8277424908741188e-06,
"loss": 1.3398,
"step": 601
},
{
"epoch": 3.71,
"learning_rate": 2.685017843964177e-06,
"loss": 1.3149,
"step": 602
},
{
"epoch": 3.72,
"learning_rate": 2.5459564310762735e-06,
"loss": 1.3404,
"step": 603
},
{
"epoch": 3.72,
"learning_rate": 2.4105617102055496e-06,
"loss": 1.3221,
"step": 604
},
{
"epoch": 3.73,
"learning_rate": 2.2788370481687965e-06,
"loss": 1.3568,
"step": 605
},
{
"epoch": 3.74,
"learning_rate": 2.150785720520559e-06,
"loss": 1.3547,
"step": 606
},
{
"epoch": 3.74,
"learning_rate": 2.0264109114717987e-06,
"loss": 1.3684,
"step": 607
},
{
"epoch": 3.75,
"learning_rate": 1.905715713810657e-06,
"loss": 1.3559,
"step": 608
},
{
"epoch": 3.76,
"learning_rate": 1.7887031288256026e-06,
"loss": 1.3349,
"step": 609
},
{
"epoch": 3.76,
"learning_rate": 1.6753760662307215e-06,
"loss": 1.3337,
"step": 610
},
{
"epoch": 3.77,
"learning_rate": 1.5657373440934595e-06,
"loss": 1.336,
"step": 611
},
{
"epoch": 3.77,
"learning_rate": 1.4597896887644456e-06,
"loss": 1.3671,
"step": 612
},
{
"epoch": 3.78,
"learning_rate": 1.3575357348097948e-06,
"loss": 1.3377,
"step": 613
},
{
"epoch": 3.79,
"learning_rate": 1.2589780249454618e-06,
"loss": 1.3552,
"step": 614
},
{
"epoch": 3.79,
"learning_rate": 1.1641190099741904e-06,
"loss": 1.3821,
"step": 615
},
{
"epoch": 3.8,
"learning_rate": 1.0729610487243966e-06,
"loss": 1.3359,
"step": 616
},
{
"epoch": 3.81,
"learning_rate": 9.855064079916653e-07,
"loss": 1.3513,
"step": 617
},
{
"epoch": 3.81,
"learning_rate": 9.017572624822112e-07,
"loss": 1.372,
"step": 618
},
{
"epoch": 3.82,
"learning_rate": 8.217156947590064e-07,
"loss": 1.3418,
"step": 619
},
{
"epoch": 3.82,
"learning_rate": 7.453836951897885e-07,
"loss": 1.3239,
"step": 620
},
{
"epoch": 3.83,
"learning_rate": 6.727631618977325e-07,
"loss": 1.3551,
"step": 621
},
{
"epoch": 3.84,
"learning_rate": 6.038559007141397e-07,
"loss": 1.3454,
"step": 622
},
{
"epoch": 3.84,
"learning_rate": 5.386636251336229e-07,
"loss": 1.3146,
"step": 623
},
{
"epoch": 3.85,
"learning_rate": 4.771879562713576e-07,
"loss": 1.3709,
"step": 624
},
{
"epoch": 3.86,
"learning_rate": 4.194304228229806e-07,
"loss": 1.3441,
"step": 625
},
{
"epoch": 3.86,
"learning_rate": 3.653924610263703e-07,
"loss": 1.3294,
"step": 626
},
{
"epoch": 3.87,
"learning_rate": 3.1507541462604255e-07,
"loss": 1.3424,
"step": 627
},
{
"epoch": 3.87,
"learning_rate": 2.6848053483972677e-07,
"loss": 1.3857,
"step": 628
},
{
"epoch": 3.88,
"learning_rate": 2.2560898032724096e-07,
"loss": 1.3648,
"step": 629
},
{
"epoch": 3.89,
"learning_rate": 1.8646181716164831e-07,
"loss": 1.3508,
"step": 630
},
{
"epoch": 3.89,
"learning_rate": 1.5104001880281157e-07,
"loss": 1.3322,
"step": 631
},
{
"epoch": 3.9,
"learning_rate": 1.1934446607311243e-07,
"loss": 1.366,
"step": 632
},
{
"epoch": 3.9,
"learning_rate": 9.137594713563568e-08,
"loss": 1.371,
"step": 633
},
{
"epoch": 3.91,
"learning_rate": 6.71351574745016e-08,
"loss": 1.3304,
"step": 634
},
{
"epoch": 3.92,
"learning_rate": 4.662269987756317e-08,
"loss": 1.3275,
"step": 635
},
{
"epoch": 3.92,
"learning_rate": 2.9839084421467984e-08,
"loss": 1.3887,
"step": 636
},
{
"epoch": 3.93,
"learning_rate": 1.6784728458985042e-08,
"loss": 1.3698,
"step": 637
},
{
"epoch": 3.94,
"learning_rate": 7.45995660854648e-09,
"loss": 1.3629,
"step": 638
},
{
"epoch": 3.94,
"learning_rate": 1.865000746220646e-09,
"loss": 1.3392,
"step": 639
},
{
"epoch": 3.95,
"learning_rate": 0.0,
"loss": 1.3235,
"step": 640
},
{
"epoch": 3.95,
"eval_loss": 1.9365400075912476,
"eval_runtime": 290.1841,
"eval_samples_per_second": 205.769,
"eval_steps_per_second": 205.769,
"step": 640
}
],
"logging_steps": 1,
"max_steps": 640,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 160,
"total_flos": 1.8738986980107878e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}