Jakob Frick
add everything
9d5d827
raw
history blame contribute delete
No virus
27.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.916083916083916,
"eval_steps": 9,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.027972027972027972,
"grad_norm": 5.75,
"learning_rate": 2e-05,
"loss": 1.8618,
"step": 1
},
{
"epoch": 0.027972027972027972,
"eval_loss": 1.856866478919983,
"eval_runtime": 4.5304,
"eval_samples_per_second": 3.532,
"eval_steps_per_second": 1.766,
"step": 1
},
{
"epoch": 0.055944055944055944,
"grad_norm": 5.5,
"learning_rate": 4e-05,
"loss": 1.8343,
"step": 2
},
{
"epoch": 0.08391608391608392,
"grad_norm": 5.75,
"learning_rate": 6e-05,
"loss": 1.87,
"step": 3
},
{
"epoch": 0.11188811188811189,
"grad_norm": 6.0625,
"learning_rate": 8e-05,
"loss": 1.6976,
"step": 4
},
{
"epoch": 0.13986013986013987,
"grad_norm": 5.6875,
"learning_rate": 0.0001,
"loss": 1.0745,
"step": 5
},
{
"epoch": 0.16783216783216784,
"grad_norm": 2.875,
"learning_rate": 0.00012,
"loss": 0.5387,
"step": 6
},
{
"epoch": 0.1958041958041958,
"grad_norm": 2.90625,
"learning_rate": 0.00014,
"loss": 0.3055,
"step": 7
},
{
"epoch": 0.22377622377622378,
"grad_norm": 0.51171875,
"learning_rate": 0.00016,
"loss": 0.0592,
"step": 8
},
{
"epoch": 0.2517482517482518,
"grad_norm": 0.65625,
"learning_rate": 0.00018,
"loss": 0.0185,
"step": 9
},
{
"epoch": 0.2517482517482518,
"eval_loss": 0.0595741793513298,
"eval_runtime": 4.6321,
"eval_samples_per_second": 3.454,
"eval_steps_per_second": 1.727,
"step": 9
},
{
"epoch": 0.27972027972027974,
"grad_norm": 1.65625,
"learning_rate": 0.0002,
"loss": 0.1101,
"step": 10
},
{
"epoch": 0.3076923076923077,
"grad_norm": 6.375,
"learning_rate": 0.00019997080140801932,
"loss": 0.1327,
"step": 11
},
{
"epoch": 0.3356643356643357,
"grad_norm": 0.21484375,
"learning_rate": 0.00019988322268323268,
"loss": 0.0082,
"step": 12
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.1357421875,
"learning_rate": 0.00019973731496914914,
"loss": 0.0049,
"step": 13
},
{
"epoch": 0.3916083916083916,
"grad_norm": 0.0693359375,
"learning_rate": 0.00019953316347176488,
"loss": 0.0024,
"step": 14
},
{
"epoch": 0.4195804195804196,
"grad_norm": 0.49609375,
"learning_rate": 0.0001992708874098054,
"loss": 0.065,
"step": 15
},
{
"epoch": 0.44755244755244755,
"grad_norm": 0.07568359375,
"learning_rate": 0.0001989506399451051,
"loss": 0.0029,
"step": 16
},
{
"epoch": 0.4755244755244755,
"grad_norm": 0.134765625,
"learning_rate": 0.0001985726080931651,
"loss": 0.0053,
"step": 17
},
{
"epoch": 0.5034965034965035,
"grad_norm": 0.1416015625,
"learning_rate": 0.00019813701261394136,
"loss": 0.0056,
"step": 18
},
{
"epoch": 0.5034965034965035,
"eval_loss": 0.02016551047563553,
"eval_runtime": 4.6628,
"eval_samples_per_second": 3.431,
"eval_steps_per_second": 1.716,
"step": 18
},
{
"epoch": 0.5314685314685315,
"grad_norm": 0.0791015625,
"learning_rate": 0.00019764410788292722,
"loss": 0.003,
"step": 19
},
{
"epoch": 0.5594405594405595,
"grad_norm": 0.032470703125,
"learning_rate": 0.0001970941817426052,
"loss": 0.0011,
"step": 20
},
{
"epoch": 0.5874125874125874,
"grad_norm": 0.447265625,
"learning_rate": 0.00019648755533435518,
"loss": 0.0445,
"step": 21
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.87890625,
"learning_rate": 0.00019582458291091663,
"loss": 0.0708,
"step": 22
},
{
"epoch": 0.6433566433566433,
"grad_norm": 0.259765625,
"learning_rate": 0.00019510565162951537,
"loss": 0.0114,
"step": 23
},
{
"epoch": 0.6713286713286714,
"grad_norm": 0.609375,
"learning_rate": 0.0001943311813257743,
"loss": 0.0243,
"step": 24
},
{
"epoch": 0.6993006993006993,
"grad_norm": 1.921875,
"learning_rate": 0.0001935016242685415,
"loss": 0.0655,
"step": 25
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.515625,
"learning_rate": 0.00019261746489577765,
"loss": 0.0756,
"step": 26
},
{
"epoch": 0.7552447552447552,
"grad_norm": 0.0201416015625,
"learning_rate": 0.00019167921953165825,
"loss": 0.0008,
"step": 27
},
{
"epoch": 0.7552447552447552,
"eval_loss": 0.0004657781682908535,
"eval_runtime": 4.6398,
"eval_samples_per_second": 3.448,
"eval_steps_per_second": 1.724,
"step": 27
},
{
"epoch": 0.7832167832167832,
"grad_norm": 0.0186767578125,
"learning_rate": 0.00019068743608505455,
"loss": 0.0008,
"step": 28
},
{
"epoch": 0.8111888111888111,
"grad_norm": 0.02392578125,
"learning_rate": 0.00018964269372957038,
"loss": 0.0008,
"step": 29
},
{
"epoch": 0.8391608391608392,
"grad_norm": 0.134765625,
"learning_rate": 0.000188545602565321,
"loss": 0.0046,
"step": 30
},
{
"epoch": 0.8671328671328671,
"grad_norm": 0.1416015625,
"learning_rate": 0.0001873968032626518,
"loss": 0.0049,
"step": 31
},
{
"epoch": 0.8951048951048951,
"grad_norm": 0.1220703125,
"learning_rate": 0.00018619696668800492,
"loss": 0.0047,
"step": 32
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.0240478515625,
"learning_rate": 0.0001849467935121521,
"loss": 0.0012,
"step": 33
},
{
"epoch": 0.951048951048951,
"grad_norm": 0.032958984375,
"learning_rate": 0.00018364701380102266,
"loss": 0.0014,
"step": 34
},
{
"epoch": 0.9790209790209791,
"grad_norm": 0.0247802734375,
"learning_rate": 0.00018229838658936564,
"loss": 0.001,
"step": 35
},
{
"epoch": 1.006993006993007,
"grad_norm": 0.01458740234375,
"learning_rate": 0.00018090169943749476,
"loss": 0.0006,
"step": 36
},
{
"epoch": 1.006993006993007,
"eval_loss": 0.00015493688988499343,
"eval_runtime": 4.6565,
"eval_samples_per_second": 3.436,
"eval_steps_per_second": 1.718,
"step": 36
},
{
"epoch": 1.034965034965035,
"grad_norm": 0.00191497802734375,
"learning_rate": 0.00017945776797137543,
"loss": 0.0001,
"step": 37
},
{
"epoch": 1.062937062937063,
"grad_norm": 0.00726318359375,
"learning_rate": 0.00017796743540632223,
"loss": 0.0003,
"step": 38
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.002288818359375,
"learning_rate": 0.00017643157205458483,
"loss": 0.0001,
"step": 39
},
{
"epoch": 1.118881118881119,
"grad_norm": 0.0029144287109375,
"learning_rate": 0.00017485107481711012,
"loss": 0.0001,
"step": 40
},
{
"epoch": 1.1468531468531469,
"grad_norm": 0.0009918212890625,
"learning_rate": 0.00017322686665977737,
"loss": 0.0001,
"step": 41
},
{
"epoch": 1.1748251748251748,
"grad_norm": 0.000743865966796875,
"learning_rate": 0.00017155989607441213,
"loss": 0.0,
"step": 42
},
{
"epoch": 1.2027972027972027,
"grad_norm": 0.001007080078125,
"learning_rate": 0.00016985113652489374,
"loss": 0.0001,
"step": 43
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.002471923828125,
"learning_rate": 0.00016810158587867973,
"loss": 0.0001,
"step": 44
},
{
"epoch": 1.2587412587412588,
"grad_norm": 0.00103759765625,
"learning_rate": 0.00016631226582407952,
"loss": 0.0001,
"step": 45
},
{
"epoch": 1.2587412587412588,
"eval_loss": 2.9456097763613798e-05,
"eval_runtime": 4.6586,
"eval_samples_per_second": 3.434,
"eval_steps_per_second": 1.717,
"step": 45
},
{
"epoch": 1.2867132867132867,
"grad_norm": 0.000637054443359375,
"learning_rate": 0.00016448422127361706,
"loss": 0.0,
"step": 46
},
{
"epoch": 1.3146853146853146,
"grad_norm": 0.000431060791015625,
"learning_rate": 0.00016261851975383137,
"loss": 0.0,
"step": 47
},
{
"epoch": 1.3426573426573427,
"grad_norm": 0.0020904541015625,
"learning_rate": 0.00016071625078187114,
"loss": 0.0001,
"step": 48
},
{
"epoch": 1.3706293706293706,
"grad_norm": 0.000568389892578125,
"learning_rate": 0.00015877852522924732,
"loss": 0.0,
"step": 49
},
{
"epoch": 1.3986013986013985,
"grad_norm": 0.00135040283203125,
"learning_rate": 0.00015680647467311557,
"loss": 0.0,
"step": 50
},
{
"epoch": 1.4265734265734267,
"grad_norm": 0.000621795654296875,
"learning_rate": 0.00015480125073546704,
"loss": 0.0,
"step": 51
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.7578125,
"learning_rate": 0.0001527640244106133,
"loss": 0.0953,
"step": 52
},
{
"epoch": 1.4825174825174825,
"grad_norm": 0.005279541015625,
"learning_rate": 0.00015069598538135906,
"loss": 0.0002,
"step": 53
},
{
"epoch": 1.5104895104895104,
"grad_norm": 0.0101318359375,
"learning_rate": 0.0001485983413242606,
"loss": 0.0004,
"step": 54
},
{
"epoch": 1.5104895104895104,
"eval_loss": 0.0003907401696778834,
"eval_runtime": 4.6412,
"eval_samples_per_second": 3.447,
"eval_steps_per_second": 1.724,
"step": 54
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.007568359375,
"learning_rate": 0.00014647231720437686,
"loss": 0.0003,
"step": 55
},
{
"epoch": 1.5664335664335665,
"grad_norm": 0.0167236328125,
"learning_rate": 0.00014431915455992414,
"loss": 0.0007,
"step": 56
},
{
"epoch": 1.5944055944055944,
"grad_norm": 0.0703125,
"learning_rate": 0.00014214011077725292,
"loss": 0.0033,
"step": 57
},
{
"epoch": 1.6223776223776225,
"grad_norm": 0.046142578125,
"learning_rate": 0.00013993645835656953,
"loss": 0.0022,
"step": 58
},
{
"epoch": 1.6503496503496504,
"grad_norm": 0.32421875,
"learning_rate": 0.00013770948416883205,
"loss": 0.016,
"step": 59
},
{
"epoch": 1.6783216783216783,
"grad_norm": 0.12060546875,
"learning_rate": 0.00013546048870425356,
"loss": 0.0058,
"step": 60
},
{
"epoch": 1.7062937062937062,
"grad_norm": 0.0341796875,
"learning_rate": 0.00013319078531285285,
"loss": 0.0019,
"step": 61
},
{
"epoch": 1.7342657342657342,
"grad_norm": 0.0166015625,
"learning_rate": 0.00013090169943749476,
"loss": 0.001,
"step": 62
},
{
"epoch": 1.762237762237762,
"grad_norm": 0.01190185546875,
"learning_rate": 0.00012859456783986893,
"loss": 0.0007,
"step": 63
},
{
"epoch": 1.762237762237762,
"eval_loss": 0.00017181402654387057,
"eval_runtime": 4.6193,
"eval_samples_per_second": 3.464,
"eval_steps_per_second": 1.732,
"step": 63
},
{
"epoch": 1.7902097902097902,
"grad_norm": 0.005401611328125,
"learning_rate": 0.0001262707378198587,
"loss": 0.0003,
"step": 64
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.00157928466796875,
"learning_rate": 0.0001239315664287558,
"loss": 0.0001,
"step": 65
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.0022125244140625,
"learning_rate": 0.00012157841967678063,
"loss": 0.0002,
"step": 66
},
{
"epoch": 1.8741258741258742,
"grad_norm": 0.00225830078125,
"learning_rate": 0.00011921267173537086,
"loss": 0.0001,
"step": 67
},
{
"epoch": 1.902097902097902,
"grad_norm": 0.00179290771484375,
"learning_rate": 0.00011683570413470383,
"loss": 0.0001,
"step": 68
},
{
"epoch": 1.93006993006993,
"grad_norm": 0.00188446044921875,
"learning_rate": 0.00011444890495692213,
"loss": 0.0001,
"step": 69
},
{
"epoch": 1.958041958041958,
"grad_norm": 0.0012054443359375,
"learning_rate": 0.0001120536680255323,
"loss": 0.0001,
"step": 70
},
{
"epoch": 1.986013986013986,
"grad_norm": 0.0030364990234375,
"learning_rate": 0.00010965139209145152,
"loss": 0.0002,
"step": 71
},
{
"epoch": 2.013986013986014,
"grad_norm": 0.0012359619140625,
"learning_rate": 0.00010724348001617625,
"loss": 0.0001,
"step": 72
},
{
"epoch": 2.013986013986014,
"eval_loss": 9.037779818754643e-05,
"eval_runtime": 4.5862,
"eval_samples_per_second": 3.489,
"eval_steps_per_second": 1.744,
"step": 72
},
{
"epoch": 2.041958041958042,
"grad_norm": 0.0011444091796875,
"learning_rate": 0.00010483133795255071,
"loss": 0.0001,
"step": 73
},
{
"epoch": 2.06993006993007,
"grad_norm": 0.00121307373046875,
"learning_rate": 0.00010241637452361323,
"loss": 0.0001,
"step": 74
},
{
"epoch": 2.097902097902098,
"grad_norm": 0.002105712890625,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 75
},
{
"epoch": 2.125874125874126,
"grad_norm": 0.002227783203125,
"learning_rate": 9.75836254763868e-05,
"loss": 0.0001,
"step": 76
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.318359375,
"learning_rate": 9.516866204744931e-05,
"loss": 0.0985,
"step": 77
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.0015869140625,
"learning_rate": 9.275651998382377e-05,
"loss": 0.0001,
"step": 78
},
{
"epoch": 2.20979020979021,
"grad_norm": 0.00093841552734375,
"learning_rate": 9.034860790854849e-05,
"loss": 0.0001,
"step": 79
},
{
"epoch": 2.237762237762238,
"grad_norm": 0.00142669677734375,
"learning_rate": 8.79463319744677e-05,
"loss": 0.0001,
"step": 80
},
{
"epoch": 2.265734265734266,
"grad_norm": 0.00189208984375,
"learning_rate": 8.55510950430779e-05,
"loss": 0.0001,
"step": 81
},
{
"epoch": 2.265734265734266,
"eval_loss": 0.00017878212383948267,
"eval_runtime": 4.6091,
"eval_samples_per_second": 3.471,
"eval_steps_per_second": 1.736,
"step": 81
},
{
"epoch": 2.2937062937062938,
"grad_norm": 0.003875732421875,
"learning_rate": 8.316429586529615e-05,
"loss": 0.0002,
"step": 82
},
{
"epoch": 2.3216783216783217,
"grad_norm": 0.00482177734375,
"learning_rate": 8.078732826462915e-05,
"loss": 0.0003,
"step": 83
},
{
"epoch": 2.3496503496503496,
"grad_norm": 0.00628662109375,
"learning_rate": 7.84215803232194e-05,
"loss": 0.0004,
"step": 84
},
{
"epoch": 2.3776223776223775,
"grad_norm": 0.006072998046875,
"learning_rate": 7.606843357124426e-05,
"loss": 0.0003,
"step": 85
},
{
"epoch": 2.4055944055944054,
"grad_norm": 0.01068115234375,
"learning_rate": 7.372926218014131e-05,
"loss": 0.0005,
"step": 86
},
{
"epoch": 2.4335664335664333,
"grad_norm": 0.008056640625,
"learning_rate": 7.14054321601311e-05,
"loss": 0.0005,
"step": 87
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.006927490234375,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0004,
"step": 88
},
{
"epoch": 2.4895104895104896,
"grad_norm": 0.01263427734375,
"learning_rate": 6.680921468714719e-05,
"loss": 0.0007,
"step": 89
},
{
"epoch": 2.5174825174825175,
"grad_norm": 0.01202392578125,
"learning_rate": 6.453951129574644e-05,
"loss": 0.0006,
"step": 90
},
{
"epoch": 2.5174825174825175,
"eval_loss": 0.00044604684808291495,
"eval_runtime": 4.5475,
"eval_samples_per_second": 3.518,
"eval_steps_per_second": 1.759,
"step": 90
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.01318359375,
"learning_rate": 6.229051583116796e-05,
"loss": 0.0007,
"step": 91
},
{
"epoch": 2.5734265734265733,
"grad_norm": 0.01214599609375,
"learning_rate": 6.006354164343046e-05,
"loss": 0.0007,
"step": 92
},
{
"epoch": 2.6013986013986012,
"grad_norm": 0.00799560546875,
"learning_rate": 5.785988922274711e-05,
"loss": 0.0006,
"step": 93
},
{
"epoch": 2.629370629370629,
"grad_norm": 0.0123291015625,
"learning_rate": 5.568084544007588e-05,
"loss": 0.0007,
"step": 94
},
{
"epoch": 2.6573426573426575,
"grad_norm": 0.011962890625,
"learning_rate": 5.3527682795623146e-05,
"loss": 0.0007,
"step": 95
},
{
"epoch": 2.6853146853146854,
"grad_norm": 0.007293701171875,
"learning_rate": 5.14016586757394e-05,
"loss": 0.0004,
"step": 96
},
{
"epoch": 2.7132867132867133,
"grad_norm": 0.01287841796875,
"learning_rate": 4.9304014618640995e-05,
"loss": 0.0007,
"step": 97
},
{
"epoch": 2.7412587412587412,
"grad_norm": 0.08154296875,
"learning_rate": 4.723597558938672e-05,
"loss": 0.0011,
"step": 98
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.0096435546875,
"learning_rate": 4.519874926453302e-05,
"loss": 0.0006,
"step": 99
},
{
"epoch": 2.769230769230769,
"eval_loss": 0.00037250021705403924,
"eval_runtime": 4.6476,
"eval_samples_per_second": 3.443,
"eval_steps_per_second": 1.721,
"step": 99
},
{
"epoch": 2.797202797202797,
"grad_norm": 0.00970458984375,
"learning_rate": 4.3193525326884435e-05,
"loss": 0.0005,
"step": 100
},
{
"epoch": 2.825174825174825,
"grad_norm": 0.007781982421875,
"learning_rate": 4.12214747707527e-05,
"loss": 0.0004,
"step": 101
},
{
"epoch": 2.8531468531468533,
"grad_norm": 0.0108642578125,
"learning_rate": 3.9283749218128885e-05,
"loss": 0.0006,
"step": 102
},
{
"epoch": 2.8811188811188813,
"grad_norm": 0.00811767578125,
"learning_rate": 3.738148024616863e-05,
"loss": 0.0005,
"step": 103
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.007080078125,
"learning_rate": 3.5515778726382966e-05,
"loss": 0.0004,
"step": 104
},
{
"epoch": 2.937062937062937,
"grad_norm": 0.0140380859375,
"learning_rate": 3.36877341759205e-05,
"loss": 0.0008,
"step": 105
},
{
"epoch": 2.965034965034965,
"grad_norm": 0.01104736328125,
"learning_rate": 3.1898414121320276e-05,
"loss": 0.0007,
"step": 106
},
{
"epoch": 2.993006993006993,
"grad_norm": 0.00726318359375,
"learning_rate": 3.0148863475106314e-05,
"loss": 0.0004,
"step": 107
},
{
"epoch": 3.020979020979021,
"grad_norm": 0.007659912109375,
"learning_rate": 2.84401039255879e-05,
"loss": 0.0005,
"step": 108
},
{
"epoch": 3.020979020979021,
"eval_loss": 0.00027107546338811517,
"eval_runtime": 4.6476,
"eval_samples_per_second": 3.443,
"eval_steps_per_second": 1.721,
"step": 108
},
{
"epoch": 3.0489510489510487,
"grad_norm": 0.006683349609375,
"learning_rate": 2.677313334022268e-05,
"loss": 0.0004,
"step": 109
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.0067138671875,
"learning_rate": 2.514892518288988e-05,
"loss": 0.0004,
"step": 110
},
{
"epoch": 3.104895104895105,
"grad_norm": 0.004974365234375,
"learning_rate": 2.356842794541516e-05,
"loss": 0.0003,
"step": 111
},
{
"epoch": 3.132867132867133,
"grad_norm": 0.004119873046875,
"learning_rate": 2.2032564593677774e-05,
"loss": 0.0003,
"step": 112
},
{
"epoch": 3.160839160839161,
"grad_norm": 0.005035400390625,
"learning_rate": 2.0542232028624586e-05,
"loss": 0.0003,
"step": 113
},
{
"epoch": 3.1888111888111887,
"grad_norm": 0.00799560546875,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.0005,
"step": 114
},
{
"epoch": 3.2167832167832167,
"grad_norm": 0.00634765625,
"learning_rate": 1.7701613410634365e-05,
"loss": 0.0003,
"step": 115
},
{
"epoch": 3.2447552447552446,
"grad_norm": 0.00811767578125,
"learning_rate": 1.6352986198977325e-05,
"loss": 0.0004,
"step": 116
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.006256103515625,
"learning_rate": 1.5053206487847914e-05,
"loss": 0.0003,
"step": 117
},
{
"epoch": 3.2727272727272725,
"eval_loss": 0.00024080852745100856,
"eval_runtime": 4.6559,
"eval_samples_per_second": 3.436,
"eval_steps_per_second": 1.718,
"step": 117
},
{
"epoch": 3.300699300699301,
"grad_norm": 0.00762939453125,
"learning_rate": 1.3803033311995072e-05,
"loss": 0.0004,
"step": 118
},
{
"epoch": 3.3286713286713288,
"grad_norm": 0.00933837890625,
"learning_rate": 1.260319673734821e-05,
"loss": 0.0007,
"step": 119
},
{
"epoch": 3.3566433566433567,
"grad_norm": 0.003875732421875,
"learning_rate": 1.1454397434679021e-05,
"loss": 0.0003,
"step": 120
},
{
"epoch": 3.3846153846153846,
"grad_norm": 0.0057373046875,
"learning_rate": 1.0357306270429624e-05,
"loss": 0.0003,
"step": 121
},
{
"epoch": 3.4125874125874125,
"grad_norm": 0.004913330078125,
"learning_rate": 9.31256391494546e-06,
"loss": 0.0003,
"step": 122
},
{
"epoch": 3.4405594405594404,
"grad_norm": 0.00592041015625,
"learning_rate": 8.32078046834176e-06,
"loss": 0.0003,
"step": 123
},
{
"epoch": 3.4685314685314683,
"grad_norm": 0.0035247802734375,
"learning_rate": 7.382535104222366e-06,
"loss": 0.0002,
"step": 124
},
{
"epoch": 3.4965034965034967,
"grad_norm": 0.00482177734375,
"learning_rate": 6.498375731458528e-06,
"loss": 0.0002,
"step": 125
},
{
"epoch": 3.5244755244755246,
"grad_norm": 0.006683349609375,
"learning_rate": 5.668818674225685e-06,
"loss": 0.0004,
"step": 126
},
{
"epoch": 3.5244755244755246,
"eval_loss": 0.00024340736854355782,
"eval_runtime": 4.6706,
"eval_samples_per_second": 3.426,
"eval_steps_per_second": 1.713,
"step": 126
},
{
"epoch": 3.5524475524475525,
"grad_norm": 0.007049560546875,
"learning_rate": 4.8943483704846475e-06,
"loss": 0.0004,
"step": 127
},
{
"epoch": 3.5804195804195804,
"grad_norm": 0.005950927734375,
"learning_rate": 4.175417089083378e-06,
"loss": 0.0003,
"step": 128
},
{
"epoch": 3.6083916083916083,
"grad_norm": 0.004486083984375,
"learning_rate": 3.512444665644865e-06,
"loss": 0.0003,
"step": 129
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.02294921875,
"learning_rate": 2.905818257394799e-06,
"loss": 0.0006,
"step": 130
},
{
"epoch": 3.664335664335664,
"grad_norm": 0.006805419921875,
"learning_rate": 2.3558921170727888e-06,
"loss": 0.0004,
"step": 131
},
{
"epoch": 3.6923076923076925,
"grad_norm": 0.006622314453125,
"learning_rate": 1.8629873860586566e-06,
"loss": 0.0004,
"step": 132
},
{
"epoch": 3.7202797202797204,
"grad_norm": 0.220703125,
"learning_rate": 1.4273919068349184e-06,
"loss": 0.0492,
"step": 133
},
{
"epoch": 3.7482517482517483,
"grad_norm": 0.0050048828125,
"learning_rate": 1.0493600548948878e-06,
"loss": 0.0003,
"step": 134
},
{
"epoch": 3.7762237762237763,
"grad_norm": 0.00958251953125,
"learning_rate": 7.291125901946027e-07,
"loss": 0.0006,
"step": 135
},
{
"epoch": 3.7762237762237763,
"eval_loss": 0.0002461140393279493,
"eval_runtime": 4.6249,
"eval_samples_per_second": 3.46,
"eval_steps_per_second": 1.73,
"step": 135
},
{
"epoch": 3.804195804195804,
"grad_norm": 0.004302978515625,
"learning_rate": 4.668365282351372e-07,
"loss": 0.0002,
"step": 136
},
{
"epoch": 3.832167832167832,
"grad_norm": 0.0074462890625,
"learning_rate": 2.6268503085089547e-07,
"loss": 0.0004,
"step": 137
},
{
"epoch": 3.86013986013986,
"grad_norm": 0.0048828125,
"learning_rate": 1.1677731676733584e-07,
"loss": 0.0003,
"step": 138
},
{
"epoch": 3.8881118881118883,
"grad_norm": 0.0048828125,
"learning_rate": 2.9198591980705848e-08,
"loss": 0.0003,
"step": 139
},
{
"epoch": 3.916083916083916,
"grad_norm": 0.0057373046875,
"learning_rate": 0.0,
"loss": 0.0003,
"step": 140
}
],
"logging_steps": 1,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 35,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.083234187669668e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}