lora-8b-physic / checkpoint-336 /trainer_state.json
kloodia's picture
Upload folder using huggingface_hub
c4d7b6b verified
raw
history blame contribute delete
No virus
55.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.979228486646884,
"eval_steps": 42,
"global_step": 336,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.0981353223323822,
"learning_rate": 2e-05,
"loss": 0.641,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 0.6416735053062439,
"eval_runtime": 21.4326,
"eval_samples_per_second": 46.331,
"eval_steps_per_second": 11.618,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.09748291969299316,
"learning_rate": 4e-05,
"loss": 0.6396,
"step": 2
},
{
"epoch": 0.02,
"grad_norm": 0.09947647899389267,
"learning_rate": 6e-05,
"loss": 0.6397,
"step": 3
},
{
"epoch": 0.02,
"grad_norm": 0.09976381808519363,
"learning_rate": 8e-05,
"loss": 0.6371,
"step": 4
},
{
"epoch": 0.03,
"grad_norm": 0.10493721067905426,
"learning_rate": 0.0001,
"loss": 0.6491,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 0.1144007071852684,
"learning_rate": 0.00012,
"loss": 0.6218,
"step": 6
},
{
"epoch": 0.04,
"grad_norm": 0.08536222577095032,
"learning_rate": 0.00014,
"loss": 0.6177,
"step": 7
},
{
"epoch": 0.05,
"grad_norm": 0.11926598846912384,
"learning_rate": 0.00016,
"loss": 0.5861,
"step": 8
},
{
"epoch": 0.05,
"grad_norm": 0.15648387372493744,
"learning_rate": 0.00018,
"loss": 0.6006,
"step": 9
},
{
"epoch": 0.06,
"grad_norm": 0.12172720581293106,
"learning_rate": 0.0002,
"loss": 0.5845,
"step": 10
},
{
"epoch": 0.07,
"grad_norm": 0.09348208457231522,
"learning_rate": 0.0001999988739622358,
"loss": 0.5471,
"step": 11
},
{
"epoch": 0.07,
"grad_norm": 0.07471276819705963,
"learning_rate": 0.00019999549587430254,
"loss": 0.578,
"step": 12
},
{
"epoch": 0.08,
"grad_norm": 0.07200929522514343,
"learning_rate": 0.00019998986581227718,
"loss": 0.5328,
"step": 13
},
{
"epoch": 0.08,
"grad_norm": 0.07460763305425644,
"learning_rate": 0.000199981983902953,
"loss": 0.5651,
"step": 14
},
{
"epoch": 0.09,
"grad_norm": 0.07441641390323639,
"learning_rate": 0.00019997185032383664,
"loss": 0.5589,
"step": 15
},
{
"epoch": 0.09,
"grad_norm": 0.07513019442558289,
"learning_rate": 0.00019995946530314385,
"loss": 0.5736,
"step": 16
},
{
"epoch": 0.1,
"grad_norm": 0.06902395933866501,
"learning_rate": 0.00019994482911979468,
"loss": 0.556,
"step": 17
},
{
"epoch": 0.11,
"grad_norm": 0.07314619421958923,
"learning_rate": 0.00019992794210340706,
"loss": 0.5469,
"step": 18
},
{
"epoch": 0.11,
"grad_norm": 0.06833848357200623,
"learning_rate": 0.00019990880463428937,
"loss": 0.5448,
"step": 19
},
{
"epoch": 0.12,
"grad_norm": 0.07301248610019684,
"learning_rate": 0.00019988741714343177,
"loss": 0.5612,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 0.07063400000333786,
"learning_rate": 0.0001998637801124968,
"loss": 0.5446,
"step": 21
},
{
"epoch": 0.13,
"grad_norm": 0.06935883313417435,
"learning_rate": 0.00019983789407380828,
"loss": 0.5223,
"step": 22
},
{
"epoch": 0.14,
"grad_norm": 0.06576420366764069,
"learning_rate": 0.00019980975961033924,
"loss": 0.5351,
"step": 23
},
{
"epoch": 0.14,
"grad_norm": 0.07276671379804611,
"learning_rate": 0.00019977937735569915,
"loss": 0.5423,
"step": 24
},
{
"epoch": 0.15,
"grad_norm": 0.0756976306438446,
"learning_rate": 0.00019974674799411925,
"loss": 0.5344,
"step": 25
},
{
"epoch": 0.15,
"grad_norm": 0.06945928931236267,
"learning_rate": 0.00019971187226043745,
"loss": 0.5198,
"step": 26
},
{
"epoch": 0.16,
"grad_norm": 0.06587155908346176,
"learning_rate": 0.0001996747509400816,
"loss": 0.5175,
"step": 27
},
{
"epoch": 0.17,
"grad_norm": 0.0752682164311409,
"learning_rate": 0.0001996353848690519,
"loss": 0.5068,
"step": 28
},
{
"epoch": 0.17,
"grad_norm": 0.0740601122379303,
"learning_rate": 0.00019959377493390196,
"loss": 0.535,
"step": 29
},
{
"epoch": 0.18,
"grad_norm": 0.07076304405927658,
"learning_rate": 0.00019954992207171898,
"loss": 0.5079,
"step": 30
},
{
"epoch": 0.18,
"grad_norm": 0.0776033028960228,
"learning_rate": 0.00019950382727010254,
"loss": 0.5124,
"step": 31
},
{
"epoch": 0.19,
"grad_norm": 0.0779872015118599,
"learning_rate": 0.00019945549156714234,
"loss": 0.5146,
"step": 32
},
{
"epoch": 0.2,
"grad_norm": 0.08037945628166199,
"learning_rate": 0.00019940491605139498,
"loss": 0.5189,
"step": 33
},
{
"epoch": 0.2,
"grad_norm": 0.06880298256874084,
"learning_rate": 0.0001993521018618592,
"loss": 0.506,
"step": 34
},
{
"epoch": 0.21,
"grad_norm": 0.0755767747759819,
"learning_rate": 0.00019929705018795053,
"loss": 0.4997,
"step": 35
},
{
"epoch": 0.21,
"grad_norm": 0.07505559921264648,
"learning_rate": 0.00019923976226947417,
"loss": 0.502,
"step": 36
},
{
"epoch": 0.22,
"grad_norm": 0.07533205300569534,
"learning_rate": 0.00019918023939659733,
"loss": 0.5093,
"step": 37
},
{
"epoch": 0.23,
"grad_norm": 0.0748637244105339,
"learning_rate": 0.0001991184829098201,
"loss": 0.4976,
"step": 38
},
{
"epoch": 0.23,
"grad_norm": 0.076931431889534,
"learning_rate": 0.00019905449419994518,
"loss": 0.4992,
"step": 39
},
{
"epoch": 0.24,
"grad_norm": 0.07511387020349503,
"learning_rate": 0.0001989882747080466,
"loss": 0.5069,
"step": 40
},
{
"epoch": 0.24,
"grad_norm": 0.0723625123500824,
"learning_rate": 0.00019891982592543746,
"loss": 0.4952,
"step": 41
},
{
"epoch": 0.25,
"grad_norm": 0.07320375740528107,
"learning_rate": 0.00019884914939363588,
"loss": 0.5093,
"step": 42
},
{
"epoch": 0.25,
"eval_loss": 0.5259941220283508,
"eval_runtime": 21.4684,
"eval_samples_per_second": 46.254,
"eval_steps_per_second": 11.598,
"step": 42
},
{
"epoch": 0.26,
"grad_norm": 0.07251272350549698,
"learning_rate": 0.00019877624670433086,
"loss": 0.4931,
"step": 43
},
{
"epoch": 0.26,
"grad_norm": 0.07731667906045914,
"learning_rate": 0.00019870111949934599,
"loss": 0.4879,
"step": 44
},
{
"epoch": 0.27,
"grad_norm": 0.074358269572258,
"learning_rate": 0.00019862376947060264,
"loss": 0.5049,
"step": 45
},
{
"epoch": 0.27,
"grad_norm": 0.0808371901512146,
"learning_rate": 0.0001985441983600819,
"loss": 0.517,
"step": 46
},
{
"epoch": 0.28,
"grad_norm": 0.07559769600629807,
"learning_rate": 0.00019846240795978528,
"loss": 0.4834,
"step": 47
},
{
"epoch": 0.28,
"grad_norm": 0.07425505667924881,
"learning_rate": 0.00019837840011169438,
"loss": 0.5138,
"step": 48
},
{
"epoch": 0.29,
"grad_norm": 0.07782939821481705,
"learning_rate": 0.00019829217670772935,
"loss": 0.4858,
"step": 49
},
{
"epoch": 0.3,
"grad_norm": 0.0754002034664154,
"learning_rate": 0.00019820373968970642,
"loss": 0.4941,
"step": 50
},
{
"epoch": 0.3,
"grad_norm": 0.07364428788423538,
"learning_rate": 0.000198113091049294,
"loss": 0.4835,
"step": 51
},
{
"epoch": 0.31,
"grad_norm": 0.08309967815876007,
"learning_rate": 0.00019802023282796796,
"loss": 0.5237,
"step": 52
},
{
"epoch": 0.31,
"grad_norm": 0.07548778504133224,
"learning_rate": 0.00019792516711696556,
"loss": 0.4923,
"step": 53
},
{
"epoch": 0.32,
"grad_norm": 0.07607278972864151,
"learning_rate": 0.0001978278960572384,
"loss": 0.4971,
"step": 54
},
{
"epoch": 0.33,
"grad_norm": 0.07432844489812851,
"learning_rate": 0.00019772842183940422,
"loss": 0.4874,
"step": 55
},
{
"epoch": 0.33,
"grad_norm": 0.077260322868824,
"learning_rate": 0.00019762674670369755,
"loss": 0.5067,
"step": 56
},
{
"epoch": 0.34,
"grad_norm": 0.08594146370887756,
"learning_rate": 0.00019752287293991927,
"loss": 0.4804,
"step": 57
},
{
"epoch": 0.34,
"grad_norm": 0.075816310942173,
"learning_rate": 0.00019741680288738492,
"loss": 0.4738,
"step": 58
},
{
"epoch": 0.35,
"grad_norm": 0.07784326374530792,
"learning_rate": 0.00019730853893487228,
"loss": 0.4768,
"step": 59
},
{
"epoch": 0.36,
"grad_norm": 0.08903329074382782,
"learning_rate": 0.00019719808352056724,
"loss": 0.4773,
"step": 60
},
{
"epoch": 0.36,
"grad_norm": 0.07911587506532669,
"learning_rate": 0.00019708543913200924,
"loss": 0.4672,
"step": 61
},
{
"epoch": 0.37,
"grad_norm": 0.07881385087966919,
"learning_rate": 0.00019697060830603494,
"loss": 0.4824,
"step": 62
},
{
"epoch": 0.37,
"grad_norm": 0.08292945474386215,
"learning_rate": 0.00019685359362872125,
"loss": 0.4814,
"step": 63
},
{
"epoch": 0.38,
"grad_norm": 0.08237861096858978,
"learning_rate": 0.00019673439773532713,
"loss": 0.486,
"step": 64
},
{
"epoch": 0.39,
"grad_norm": 0.07958442717790604,
"learning_rate": 0.0001966130233102341,
"loss": 0.4913,
"step": 65
},
{
"epoch": 0.39,
"grad_norm": 0.07969169318675995,
"learning_rate": 0.00019648947308688593,
"loss": 0.4781,
"step": 66
},
{
"epoch": 0.4,
"grad_norm": 0.08310563862323761,
"learning_rate": 0.00019636374984772692,
"loss": 0.4811,
"step": 67
},
{
"epoch": 0.4,
"grad_norm": 0.07763976603746414,
"learning_rate": 0.00019623585642413938,
"loss": 0.4809,
"step": 68
},
{
"epoch": 0.41,
"grad_norm": 0.0927213802933693,
"learning_rate": 0.00019610579569637982,
"loss": 0.5019,
"step": 69
},
{
"epoch": 0.42,
"grad_norm": 0.08405344933271408,
"learning_rate": 0.000195973570593514,
"loss": 0.5001,
"step": 70
},
{
"epoch": 0.42,
"grad_norm": 0.07862479984760284,
"learning_rate": 0.0001958391840933512,
"loss": 0.4894,
"step": 71
},
{
"epoch": 0.43,
"grad_norm": 0.07815296947956085,
"learning_rate": 0.00019570263922237687,
"loss": 0.4676,
"step": 72
},
{
"epoch": 0.43,
"grad_norm": 0.07999672740697861,
"learning_rate": 0.00019556393905568458,
"loss": 0.4857,
"step": 73
},
{
"epoch": 0.44,
"grad_norm": 0.08266247063875198,
"learning_rate": 0.0001954230867169069,
"loss": 0.4842,
"step": 74
},
{
"epoch": 0.45,
"grad_norm": 0.08117777854204178,
"learning_rate": 0.00019528008537814486,
"loss": 0.4602,
"step": 75
},
{
"epoch": 0.45,
"grad_norm": 0.08203484117984772,
"learning_rate": 0.00019513493825989664,
"loss": 0.4761,
"step": 76
},
{
"epoch": 0.46,
"grad_norm": 0.07647153735160828,
"learning_rate": 0.00019498764863098495,
"loss": 0.4839,
"step": 77
},
{
"epoch": 0.46,
"grad_norm": 0.0811714455485344,
"learning_rate": 0.00019483821980848347,
"loss": 0.4803,
"step": 78
},
{
"epoch": 0.47,
"grad_norm": 0.08266978710889816,
"learning_rate": 0.00019468665515764215,
"loss": 0.4665,
"step": 79
},
{
"epoch": 0.47,
"grad_norm": 0.07869689911603928,
"learning_rate": 0.00019453295809181143,
"loss": 0.4857,
"step": 80
},
{
"epoch": 0.48,
"grad_norm": 0.08934654295444489,
"learning_rate": 0.00019437713207236525,
"loss": 0.4825,
"step": 81
},
{
"epoch": 0.49,
"grad_norm": 0.07842836529016495,
"learning_rate": 0.00019421918060862333,
"loss": 0.4609,
"step": 82
},
{
"epoch": 0.49,
"grad_norm": 0.08244986832141876,
"learning_rate": 0.0001940591072577719,
"loss": 0.4688,
"step": 83
},
{
"epoch": 0.5,
"grad_norm": 0.07819854468107224,
"learning_rate": 0.00019389691562478374,
"loss": 0.4665,
"step": 84
},
{
"epoch": 0.5,
"eval_loss": 0.5117939114570618,
"eval_runtime": 21.4742,
"eval_samples_per_second": 46.242,
"eval_steps_per_second": 11.595,
"step": 84
},
{
"epoch": 0.5,
"grad_norm": 0.0837428942322731,
"learning_rate": 0.0001937326093623369,
"loss": 0.4952,
"step": 85
},
{
"epoch": 0.51,
"grad_norm": 0.07781701534986496,
"learning_rate": 0.00019356619217073253,
"loss": 0.467,
"step": 86
},
{
"epoch": 0.52,
"grad_norm": 0.08447270840406418,
"learning_rate": 0.00019339766779781145,
"loss": 0.4838,
"step": 87
},
{
"epoch": 0.52,
"grad_norm": 0.08231997489929199,
"learning_rate": 0.00019322704003886987,
"loss": 0.4611,
"step": 88
},
{
"epoch": 0.53,
"grad_norm": 0.08507382869720459,
"learning_rate": 0.00019305431273657374,
"loss": 0.4757,
"step": 89
},
{
"epoch": 0.53,
"grad_norm": 0.08521989732980728,
"learning_rate": 0.0001928794897808724,
"loss": 0.4854,
"step": 90
},
{
"epoch": 0.54,
"grad_norm": 0.0963786169886589,
"learning_rate": 0.00019270257510891082,
"loss": 0.4505,
"step": 91
},
{
"epoch": 0.55,
"grad_norm": 0.08671442419290543,
"learning_rate": 0.0001925235727049411,
"loss": 0.4766,
"step": 92
},
{
"epoch": 0.55,
"grad_norm": 0.09087081998586655,
"learning_rate": 0.0001923424866002325,
"loss": 0.4966,
"step": 93
},
{
"epoch": 0.56,
"grad_norm": 0.07899381965398788,
"learning_rate": 0.00019215932087298092,
"loss": 0.4638,
"step": 94
},
{
"epoch": 0.56,
"grad_norm": 0.09070860594511032,
"learning_rate": 0.00019197407964821684,
"loss": 0.4847,
"step": 95
},
{
"epoch": 0.57,
"grad_norm": 0.0885949656367302,
"learning_rate": 0.00019178676709771258,
"loss": 0.4648,
"step": 96
},
{
"epoch": 0.58,
"grad_norm": 0.09253839403390884,
"learning_rate": 0.00019159738743988825,
"loss": 0.459,
"step": 97
},
{
"epoch": 0.58,
"grad_norm": 0.08571318536996841,
"learning_rate": 0.00019140594493971674,
"loss": 0.4797,
"step": 98
},
{
"epoch": 0.59,
"grad_norm": 0.07787954807281494,
"learning_rate": 0.0001912124439086278,
"loss": 0.4547,
"step": 99
},
{
"epoch": 0.59,
"grad_norm": 0.08822935819625854,
"learning_rate": 0.00019101688870441078,
"loss": 0.4511,
"step": 100
},
{
"epoch": 0.6,
"grad_norm": 0.08409956842660904,
"learning_rate": 0.0001908192837311166,
"loss": 0.4631,
"step": 101
},
{
"epoch": 0.61,
"grad_norm": 0.08279416710138321,
"learning_rate": 0.00019061963343895846,
"loss": 0.4696,
"step": 102
},
{
"epoch": 0.61,
"grad_norm": 0.09696204960346222,
"learning_rate": 0.00019041794232421176,
"loss": 0.4862,
"step": 103
},
{
"epoch": 0.62,
"grad_norm": 0.08494329452514648,
"learning_rate": 0.00019021421492911272,
"loss": 0.4557,
"step": 104
},
{
"epoch": 0.62,
"grad_norm": 0.08702557533979416,
"learning_rate": 0.00019000845584175616,
"loss": 0.4693,
"step": 105
},
{
"epoch": 0.63,
"grad_norm": 0.09048158675432205,
"learning_rate": 0.00018980066969599216,
"loss": 0.4714,
"step": 106
},
{
"epoch": 0.64,
"grad_norm": 0.08462114632129669,
"learning_rate": 0.0001895908611713216,
"loss": 0.4632,
"step": 107
},
{
"epoch": 0.64,
"grad_norm": 0.09956546127796173,
"learning_rate": 0.00018937903499279102,
"loss": 0.4638,
"step": 108
},
{
"epoch": 0.65,
"grad_norm": 0.08630617707967758,
"learning_rate": 0.00018916519593088584,
"loss": 0.4499,
"step": 109
},
{
"epoch": 0.65,
"grad_norm": 0.08207620680332184,
"learning_rate": 0.0001889493488014233,
"loss": 0.4603,
"step": 110
},
{
"epoch": 0.66,
"grad_norm": 0.08473565429449081,
"learning_rate": 0.00018873149846544376,
"loss": 0.4571,
"step": 111
},
{
"epoch": 0.66,
"grad_norm": 0.08818928152322769,
"learning_rate": 0.00018851164982910135,
"loss": 0.4489,
"step": 112
},
{
"epoch": 0.67,
"grad_norm": 0.08116699010133743,
"learning_rate": 0.00018828980784355338,
"loss": 0.4578,
"step": 113
},
{
"epoch": 0.68,
"grad_norm": 0.08832226693630219,
"learning_rate": 0.00018806597750484897,
"loss": 0.4719,
"step": 114
},
{
"epoch": 0.68,
"grad_norm": 0.08624406903982162,
"learning_rate": 0.0001878401638538163,
"loss": 0.4628,
"step": 115
},
{
"epoch": 0.69,
"grad_norm": 0.08936543017625809,
"learning_rate": 0.00018761237197594945,
"loss": 0.4533,
"step": 116
},
{
"epoch": 0.69,
"grad_norm": 0.08579661697149277,
"learning_rate": 0.00018738260700129354,
"loss": 0.4772,
"step": 117
},
{
"epoch": 0.7,
"grad_norm": 0.08271288126707077,
"learning_rate": 0.0001871508741043293,
"loss": 0.4773,
"step": 118
},
{
"epoch": 0.71,
"grad_norm": 0.08224964886903763,
"learning_rate": 0.0001869171785038566,
"loss": 0.4635,
"step": 119
},
{
"epoch": 0.71,
"grad_norm": 0.087012380361557,
"learning_rate": 0.00018668152546287686,
"loss": 0.4559,
"step": 120
},
{
"epoch": 0.72,
"grad_norm": 0.08352699875831604,
"learning_rate": 0.00018644392028847458,
"loss": 0.4485,
"step": 121
},
{
"epoch": 0.72,
"grad_norm": 0.08281444013118744,
"learning_rate": 0.00018620436833169772,
"loss": 0.4393,
"step": 122
},
{
"epoch": 0.73,
"grad_norm": 0.08376545459032059,
"learning_rate": 0.00018596287498743732,
"loss": 0.4525,
"step": 123
},
{
"epoch": 0.74,
"grad_norm": 0.08526434749364853,
"learning_rate": 0.0001857194456943058,
"loss": 0.4456,
"step": 124
},
{
"epoch": 0.74,
"grad_norm": 0.08151934295892715,
"learning_rate": 0.0001854740859345148,
"loss": 0.4576,
"step": 125
},
{
"epoch": 0.75,
"grad_norm": 0.08793777972459793,
"learning_rate": 0.0001852268012337514,
"loss": 0.4431,
"step": 126
},
{
"epoch": 0.75,
"eval_loss": 0.5042669773101807,
"eval_runtime": 21.4592,
"eval_samples_per_second": 46.274,
"eval_steps_per_second": 11.603,
"step": 126
},
{
"epoch": 0.75,
"grad_norm": 0.08135095983743668,
"learning_rate": 0.00018497759716105377,
"loss": 0.4384,
"step": 127
},
{
"epoch": 0.76,
"grad_norm": 0.0917576476931572,
"learning_rate": 0.0001847264793286859,
"loss": 0.4687,
"step": 128
},
{
"epoch": 0.77,
"grad_norm": 0.08832691609859467,
"learning_rate": 0.00018447345339201102,
"loss": 0.4386,
"step": 129
},
{
"epoch": 0.77,
"grad_norm": 0.08340886980295181,
"learning_rate": 0.00018421852504936438,
"loss": 0.4512,
"step": 130
},
{
"epoch": 0.78,
"grad_norm": 0.08589499443769455,
"learning_rate": 0.00018396170004192475,
"loss": 0.4387,
"step": 131
},
{
"epoch": 0.78,
"grad_norm": 0.08753557503223419,
"learning_rate": 0.00018370298415358526,
"loss": 0.4615,
"step": 132
},
{
"epoch": 0.79,
"grad_norm": 0.08406232297420502,
"learning_rate": 0.00018344238321082315,
"loss": 0.4465,
"step": 133
},
{
"epoch": 0.8,
"grad_norm": 0.08514856547117233,
"learning_rate": 0.0001831799030825685,
"loss": 0.4516,
"step": 134
},
{
"epoch": 0.8,
"grad_norm": 0.09259331226348877,
"learning_rate": 0.000182915549680072,
"loss": 0.4387,
"step": 135
},
{
"epoch": 0.81,
"grad_norm": 0.08862275630235672,
"learning_rate": 0.00018264932895677193,
"loss": 0.4434,
"step": 136
},
{
"epoch": 0.81,
"grad_norm": 0.08515379577875137,
"learning_rate": 0.0001823812469081601,
"loss": 0.4425,
"step": 137
},
{
"epoch": 0.82,
"grad_norm": 0.09041007608175278,
"learning_rate": 0.00018211130957164668,
"loss": 0.4607,
"step": 138
},
{
"epoch": 0.82,
"grad_norm": 0.08312032371759415,
"learning_rate": 0.0001818395230264244,
"loss": 0.442,
"step": 139
},
{
"epoch": 0.83,
"grad_norm": 0.08981412649154663,
"learning_rate": 0.00018156589339333152,
"loss": 0.4608,
"step": 140
},
{
"epoch": 0.84,
"grad_norm": 0.08991118520498276,
"learning_rate": 0.00018129042683471402,
"loss": 0.451,
"step": 141
},
{
"epoch": 0.84,
"grad_norm": 0.08628728240728378,
"learning_rate": 0.00018101312955428692,
"loss": 0.4453,
"step": 142
},
{
"epoch": 0.85,
"grad_norm": 0.08730859309434891,
"learning_rate": 0.00018073400779699435,
"loss": 0.4485,
"step": 143
},
{
"epoch": 0.85,
"grad_norm": 0.08489865809679031,
"learning_rate": 0.0001804530678488691,
"loss": 0.4592,
"step": 144
},
{
"epoch": 0.86,
"grad_norm": 0.08439410477876663,
"learning_rate": 0.00018017031603689102,
"loss": 0.4326,
"step": 145
},
{
"epoch": 0.87,
"grad_norm": 0.09346488118171692,
"learning_rate": 0.0001798857587288445,
"loss": 0.4484,
"step": 146
},
{
"epoch": 0.87,
"grad_norm": 0.09130821377038956,
"learning_rate": 0.00017959940233317498,
"loss": 0.4502,
"step": 147
},
{
"epoch": 0.88,
"grad_norm": 0.08846256881952286,
"learning_rate": 0.0001793112532988448,
"loss": 0.4322,
"step": 148
},
{
"epoch": 0.88,
"grad_norm": 0.09061886370182037,
"learning_rate": 0.00017902131811518786,
"loss": 0.4437,
"step": 149
},
{
"epoch": 0.89,
"grad_norm": 0.09259927272796631,
"learning_rate": 0.00017872960331176345,
"loss": 0.4545,
"step": 150
},
{
"epoch": 0.9,
"grad_norm": 0.09632189571857452,
"learning_rate": 0.00017843611545820926,
"loss": 0.4515,
"step": 151
},
{
"epoch": 0.9,
"grad_norm": 0.08714065700769424,
"learning_rate": 0.00017814086116409348,
"loss": 0.4602,
"step": 152
},
{
"epoch": 0.91,
"grad_norm": 0.09537078440189362,
"learning_rate": 0.00017784384707876576,
"loss": 0.4482,
"step": 153
},
{
"epoch": 0.91,
"grad_norm": 0.09175322949886322,
"learning_rate": 0.00017754507989120764,
"loss": 0.4681,
"step": 154
},
{
"epoch": 0.92,
"grad_norm": 0.08962789177894592,
"learning_rate": 0.00017724456632988187,
"loss": 0.4304,
"step": 155
},
{
"epoch": 0.93,
"grad_norm": 0.09643880277872086,
"learning_rate": 0.00017694231316258077,
"loss": 0.4532,
"step": 156
},
{
"epoch": 0.93,
"grad_norm": 0.08335065096616745,
"learning_rate": 0.00017663832719627402,
"loss": 0.4504,
"step": 157
},
{
"epoch": 0.94,
"grad_norm": 0.087184838950634,
"learning_rate": 0.0001763326152769551,
"loss": 0.4752,
"step": 158
},
{
"epoch": 0.94,
"grad_norm": 0.08858635276556015,
"learning_rate": 0.0001760251842894874,
"loss": 0.4413,
"step": 159
},
{
"epoch": 0.95,
"grad_norm": 0.08101391792297363,
"learning_rate": 0.00017571604115744892,
"loss": 0.4465,
"step": 160
},
{
"epoch": 0.96,
"grad_norm": 0.08623132854700089,
"learning_rate": 0.0001754051928429765,
"loss": 0.4673,
"step": 161
},
{
"epoch": 0.96,
"grad_norm": 0.0922100692987442,
"learning_rate": 0.00017509264634660895,
"loss": 0.4587,
"step": 162
},
{
"epoch": 0.97,
"grad_norm": 0.08243449032306671,
"learning_rate": 0.00017477840870712945,
"loss": 0.4368,
"step": 163
},
{
"epoch": 0.97,
"grad_norm": 0.0845554992556572,
"learning_rate": 0.00017446248700140693,
"loss": 0.4209,
"step": 164
},
{
"epoch": 0.98,
"grad_norm": 0.08277452737092972,
"learning_rate": 0.00017414488834423687,
"loss": 0.4397,
"step": 165
},
{
"epoch": 0.99,
"grad_norm": 0.0826331302523613,
"learning_rate": 0.00017382561988818086,
"loss": 0.4333,
"step": 166
},
{
"epoch": 0.99,
"grad_norm": 0.08441821485757828,
"learning_rate": 0.0001735046888234057,
"loss": 0.4496,
"step": 167
},
{
"epoch": 1.0,
"grad_norm": 0.08665426075458527,
"learning_rate": 0.00017318210237752136,
"loss": 0.4523,
"step": 168
},
{
"epoch": 1.0,
"eval_loss": 0.4984985589981079,
"eval_runtime": 21.4662,
"eval_samples_per_second": 46.259,
"eval_steps_per_second": 11.6,
"step": 168
},
{
"epoch": 1.0,
"grad_norm": 0.08923573791980743,
"learning_rate": 0.00017285786781541824,
"loss": 0.4735,
"step": 169
},
{
"epoch": 1.01,
"grad_norm": 0.08524107187986374,
"learning_rate": 0.00017253199243910357,
"loss": 0.4323,
"step": 170
},
{
"epoch": 1.01,
"grad_norm": 0.09072479605674744,
"learning_rate": 0.00017220448358753692,
"loss": 0.4617,
"step": 171
},
{
"epoch": 1.01,
"grad_norm": 0.08986588567495346,
"learning_rate": 0.0001718753486364651,
"loss": 0.4264,
"step": 172
},
{
"epoch": 1.01,
"grad_norm": 0.09549989551305771,
"learning_rate": 0.00017154459499825564,
"loss": 0.4042,
"step": 173
},
{
"epoch": 1.02,
"grad_norm": 0.09217043220996857,
"learning_rate": 0.0001712122301217304,
"loss": 0.4028,
"step": 174
},
{
"epoch": 1.02,
"grad_norm": 0.10525793582201004,
"learning_rate": 0.00017087826149199734,
"loss": 0.4176,
"step": 175
},
{
"epoch": 1.03,
"grad_norm": 0.10329723358154297,
"learning_rate": 0.00017054269663028233,
"loss": 0.4062,
"step": 176
},
{
"epoch": 1.04,
"grad_norm": 0.10040964931249619,
"learning_rate": 0.00017020554309375946,
"loss": 0.4222,
"step": 177
},
{
"epoch": 1.04,
"grad_norm": 0.10112589597702026,
"learning_rate": 0.00016986680847538106,
"loss": 0.4058,
"step": 178
},
{
"epoch": 1.05,
"grad_norm": 0.09155958890914917,
"learning_rate": 0.0001695265004037065,
"loss": 0.4045,
"step": 179
},
{
"epoch": 1.05,
"grad_norm": 0.10278405249118805,
"learning_rate": 0.00016918462654273063,
"loss": 0.4294,
"step": 180
},
{
"epoch": 1.06,
"grad_norm": 0.10295873880386353,
"learning_rate": 0.00016884119459171105,
"loss": 0.4025,
"step": 181
},
{
"epoch": 1.07,
"grad_norm": 0.09759877622127533,
"learning_rate": 0.0001684962122849946,
"loss": 0.4227,
"step": 182
},
{
"epoch": 1.07,
"grad_norm": 0.09612429887056351,
"learning_rate": 0.00016814968739184343,
"loss": 0.3991,
"step": 183
},
{
"epoch": 1.08,
"grad_norm": 0.09644313901662827,
"learning_rate": 0.00016780162771625986,
"loss": 0.4271,
"step": 184
},
{
"epoch": 1.08,
"grad_norm": 0.09357411414384842,
"learning_rate": 0.00016745204109681064,
"loss": 0.4017,
"step": 185
},
{
"epoch": 1.09,
"grad_norm": 0.09177077561616898,
"learning_rate": 0.00016710093540645056,
"loss": 0.386,
"step": 186
},
{
"epoch": 1.09,
"grad_norm": 0.10068117827177048,
"learning_rate": 0.00016674831855234486,
"loss": 0.4127,
"step": 187
},
{
"epoch": 1.1,
"grad_norm": 0.09634707123041153,
"learning_rate": 0.00016639419847569147,
"loss": 0.403,
"step": 188
},
{
"epoch": 1.11,
"grad_norm": 0.09533964842557907,
"learning_rate": 0.00016603858315154195,
"loss": 0.4004,
"step": 189
},
{
"epoch": 1.11,
"grad_norm": 0.09449424594640732,
"learning_rate": 0.00016568148058862197,
"loss": 0.4136,
"step": 190
},
{
"epoch": 1.12,
"grad_norm": 0.09432344883680344,
"learning_rate": 0.00016532289882915103,
"loss": 0.403,
"step": 191
},
{
"epoch": 1.12,
"grad_norm": 0.10496091097593307,
"learning_rate": 0.00016496284594866113,
"loss": 0.423,
"step": 192
},
{
"epoch": 1.13,
"grad_norm": 0.0957692563533783,
"learning_rate": 0.00016460133005581512,
"loss": 0.4032,
"step": 193
},
{
"epoch": 1.14,
"grad_norm": 0.10207226127386093,
"learning_rate": 0.0001642383592922239,
"loss": 0.4068,
"step": 194
},
{
"epoch": 1.14,
"grad_norm": 0.10685818642377853,
"learning_rate": 0.00016387394183226328,
"loss": 0.4194,
"step": 195
},
{
"epoch": 1.15,
"grad_norm": 0.09566125273704529,
"learning_rate": 0.00016350808588288965,
"loss": 0.3717,
"step": 196
},
{
"epoch": 1.15,
"grad_norm": 0.10426433384418488,
"learning_rate": 0.0001631407996834553,
"loss": 0.4197,
"step": 197
},
{
"epoch": 1.16,
"grad_norm": 0.09626364707946777,
"learning_rate": 0.00016277209150552285,
"loss": 0.3965,
"step": 198
},
{
"epoch": 1.17,
"grad_norm": 0.10416010022163391,
"learning_rate": 0.000162401969652679,
"loss": 0.4042,
"step": 199
},
{
"epoch": 1.17,
"grad_norm": 0.11229964345693588,
"learning_rate": 0.0001620304424603474,
"loss": 0.4039,
"step": 200
},
{
"epoch": 1.18,
"grad_norm": 0.09705965220928192,
"learning_rate": 0.00016165751829560102,
"loss": 0.4111,
"step": 201
},
{
"epoch": 1.18,
"grad_norm": 0.09751327335834503,
"learning_rate": 0.00016128320555697364,
"loss": 0.4007,
"step": 202
},
{
"epoch": 1.19,
"grad_norm": 0.09729477018117905,
"learning_rate": 0.000160907512674271,
"loss": 0.4025,
"step": 203
},
{
"epoch": 1.2,
"grad_norm": 0.09977483749389648,
"learning_rate": 0.00016053044810838046,
"loss": 0.4143,
"step": 204
},
{
"epoch": 1.2,
"grad_norm": 0.10283766686916351,
"learning_rate": 0.0001601520203510809,
"loss": 0.4093,
"step": 205
},
{
"epoch": 1.21,
"grad_norm": 0.10398785024881363,
"learning_rate": 0.00015977223792485118,
"loss": 0.409,
"step": 206
},
{
"epoch": 1.21,
"grad_norm": 0.10487958043813705,
"learning_rate": 0.0001593911093826784,
"loss": 0.4102,
"step": 207
},
{
"epoch": 1.22,
"grad_norm": 0.10372807830572128,
"learning_rate": 0.00015900864330786518,
"loss": 0.4082,
"step": 208
},
{
"epoch": 1.23,
"grad_norm": 0.09674819558858871,
"learning_rate": 0.00015862484831383644,
"loss": 0.4093,
"step": 209
},
{
"epoch": 1.23,
"grad_norm": 0.09968870133161545,
"learning_rate": 0.00015823973304394525,
"loss": 0.4237,
"step": 210
},
{
"epoch": 1.23,
"eval_loss": 0.4985295832157135,
"eval_runtime": 21.4597,
"eval_samples_per_second": 46.273,
"eval_steps_per_second": 11.603,
"step": 210
},
{
"epoch": 1.24,
"grad_norm": 0.10222364962100983,
"learning_rate": 0.00015785330617127842,
"loss": 0.4032,
"step": 211
},
{
"epoch": 1.24,
"grad_norm": 0.09916388243436813,
"learning_rate": 0.00015746557639846097,
"loss": 0.4113,
"step": 212
},
{
"epoch": 1.25,
"grad_norm": 0.1033184677362442,
"learning_rate": 0.0001570765524574602,
"loss": 0.3956,
"step": 213
},
{
"epoch": 1.26,
"grad_norm": 0.11059914529323578,
"learning_rate": 0.00015668624310938913,
"loss": 0.404,
"step": 214
},
{
"epoch": 1.26,
"grad_norm": 0.0982404425740242,
"learning_rate": 0.00015629465714430904,
"loss": 0.3875,
"step": 215
},
{
"epoch": 1.27,
"grad_norm": 0.1017359048128128,
"learning_rate": 0.0001559018033810316,
"loss": 0.4033,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 0.1090175062417984,
"learning_rate": 0.00015550769066692034,
"loss": 0.4084,
"step": 217
},
{
"epoch": 1.28,
"grad_norm": 0.1041378527879715,
"learning_rate": 0.00015511232787769123,
"loss": 0.4095,
"step": 218
},
{
"epoch": 1.28,
"grad_norm": 0.0929458811879158,
"learning_rate": 0.00015471572391721284,
"loss": 0.3892,
"step": 219
},
{
"epoch": 1.29,
"grad_norm": 0.10203150659799576,
"learning_rate": 0.00015431788771730597,
"loss": 0.3967,
"step": 220
},
{
"epoch": 1.3,
"grad_norm": 0.10682693123817444,
"learning_rate": 0.00015391882823754228,
"loss": 0.4164,
"step": 221
},
{
"epoch": 1.3,
"grad_norm": 0.10250475257635117,
"learning_rate": 0.00015351855446504268,
"loss": 0.402,
"step": 222
},
{
"epoch": 1.31,
"grad_norm": 0.09453174471855164,
"learning_rate": 0.00015311707541427487,
"loss": 0.3927,
"step": 223
},
{
"epoch": 1.31,
"grad_norm": 0.09379514306783676,
"learning_rate": 0.00015271440012685025,
"loss": 0.3825,
"step": 224
},
{
"epoch": 1.32,
"grad_norm": 0.09439584612846375,
"learning_rate": 0.00015231053767132045,
"loss": 0.3798,
"step": 225
},
{
"epoch": 1.33,
"grad_norm": 0.10218024253845215,
"learning_rate": 0.00015190549714297303,
"loss": 0.3909,
"step": 226
},
{
"epoch": 1.33,
"grad_norm": 0.10314662754535675,
"learning_rate": 0.00015149928766362657,
"loss": 0.4075,
"step": 227
},
{
"epoch": 1.34,
"grad_norm": 0.10139516741037369,
"learning_rate": 0.00015109191838142536,
"loss": 0.4125,
"step": 228
},
{
"epoch": 1.34,
"grad_norm": 0.10410083085298538,
"learning_rate": 0.0001506833984706333,
"loss": 0.3998,
"step": 229
},
{
"epoch": 1.35,
"grad_norm": 0.11478332430124283,
"learning_rate": 0.00015027373713142735,
"loss": 0.4181,
"step": 230
},
{
"epoch": 1.36,
"grad_norm": 0.10138574987649918,
"learning_rate": 0.00014986294358969028,
"loss": 0.4152,
"step": 231
},
{
"epoch": 1.36,
"grad_norm": 0.0989016592502594,
"learning_rate": 0.0001494510270968029,
"loss": 0.399,
"step": 232
},
{
"epoch": 1.37,
"grad_norm": 0.10673234611749649,
"learning_rate": 0.00014903799692943574,
"loss": 0.4246,
"step": 233
},
{
"epoch": 1.37,
"grad_norm": 0.10326199978590012,
"learning_rate": 0.00014862386238934016,
"loss": 0.4033,
"step": 234
},
{
"epoch": 1.38,
"grad_norm": 0.09947673231363297,
"learning_rate": 0.00014820863280313873,
"loss": 0.3886,
"step": 235
},
{
"epoch": 1.39,
"grad_norm": 0.09778755158185959,
"learning_rate": 0.00014779231752211548,
"loss": 0.3934,
"step": 236
},
{
"epoch": 1.39,
"grad_norm": 0.09947831183671951,
"learning_rate": 0.0001473749259220048,
"loss": 0.4147,
"step": 237
},
{
"epoch": 1.4,
"grad_norm": 0.10698696970939636,
"learning_rate": 0.00014695646740278085,
"loss": 0.3773,
"step": 238
},
{
"epoch": 1.4,
"grad_norm": 0.09899724274873734,
"learning_rate": 0.00014653695138844557,
"loss": 0.4005,
"step": 239
},
{
"epoch": 1.41,
"grad_norm": 0.10226688534021378,
"learning_rate": 0.0001461163873268164,
"loss": 0.4009,
"step": 240
},
{
"epoch": 1.42,
"grad_norm": 0.10390684753656387,
"learning_rate": 0.0001456947846893137,
"loss": 0.4129,
"step": 241
},
{
"epoch": 1.42,
"grad_norm": 0.09909753501415253,
"learning_rate": 0.0001452721529707473,
"loss": 0.3773,
"step": 242
},
{
"epoch": 1.43,
"grad_norm": 0.09814441949129105,
"learning_rate": 0.00014484850168910263,
"loss": 0.3976,
"step": 243
},
{
"epoch": 1.43,
"grad_norm": 0.10027995705604553,
"learning_rate": 0.00014442384038532665,
"loss": 0.3951,
"step": 244
},
{
"epoch": 1.44,
"grad_norm": 0.0988469123840332,
"learning_rate": 0.00014399817862311256,
"loss": 0.3734,
"step": 245
},
{
"epoch": 1.45,
"grad_norm": 0.09742960333824158,
"learning_rate": 0.00014357152598868476,
"loss": 0.3826,
"step": 246
},
{
"epoch": 1.45,
"grad_norm": 0.1014707013964653,
"learning_rate": 0.00014314389209058286,
"loss": 0.4082,
"step": 247
},
{
"epoch": 1.46,
"grad_norm": 0.10080685466527939,
"learning_rate": 0.00014271528655944522,
"loss": 0.4104,
"step": 248
},
{
"epoch": 1.46,
"grad_norm": 0.1009359136223793,
"learning_rate": 0.0001422857190477921,
"loss": 0.3912,
"step": 249
},
{
"epoch": 1.47,
"grad_norm": 0.0964767187833786,
"learning_rate": 0.0001418551992298083,
"loss": 0.3731,
"step": 250
},
{
"epoch": 1.47,
"grad_norm": 0.09848513454198837,
"learning_rate": 0.0001414237368011253,
"loss": 0.3891,
"step": 251
},
{
"epoch": 1.48,
"grad_norm": 0.10232508182525635,
"learning_rate": 0.00014099134147860286,
"loss": 0.4002,
"step": 252
},
{
"epoch": 1.48,
"eval_loss": 0.4975546896457672,
"eval_runtime": 21.466,
"eval_samples_per_second": 46.259,
"eval_steps_per_second": 11.6,
"step": 252
},
{
"epoch": 1.49,
"grad_norm": 0.10712958872318268,
"learning_rate": 0.00014055802300011027,
"loss": 0.41,
"step": 253
},
{
"epoch": 1.49,
"grad_norm": 0.10100317001342773,
"learning_rate": 0.0001401237911243069,
"loss": 0.3954,
"step": 254
},
{
"epoch": 1.5,
"grad_norm": 0.09532604366540909,
"learning_rate": 0.00013968865563042255,
"loss": 0.4111,
"step": 255
},
{
"epoch": 1.5,
"grad_norm": 0.10244431346654892,
"learning_rate": 0.00013925262631803723,
"loss": 0.4,
"step": 256
},
{
"epoch": 1.51,
"grad_norm": 0.10212967544794083,
"learning_rate": 0.00013881571300686037,
"loss": 0.3996,
"step": 257
},
{
"epoch": 1.52,
"grad_norm": 0.09606331586837769,
"learning_rate": 0.0001383779255365097,
"loss": 0.3852,
"step": 258
},
{
"epoch": 1.52,
"grad_norm": 0.10002104192972183,
"learning_rate": 0.00013793927376628976,
"loss": 0.4126,
"step": 259
},
{
"epoch": 1.53,
"grad_norm": 0.10387251526117325,
"learning_rate": 0.00013749976757496967,
"loss": 0.4146,
"step": 260
},
{
"epoch": 1.53,
"grad_norm": 0.09998749941587448,
"learning_rate": 0.00013705941686056086,
"loss": 0.4143,
"step": 261
},
{
"epoch": 1.54,
"grad_norm": 0.09770024567842484,
"learning_rate": 0.00013661823154009395,
"loss": 0.3719,
"step": 262
},
{
"epoch": 1.55,
"grad_norm": 0.10180012881755829,
"learning_rate": 0.00013617622154939564,
"loss": 0.408,
"step": 263
},
{
"epoch": 1.55,
"grad_norm": 0.10063595324754715,
"learning_rate": 0.00013573339684286472,
"loss": 0.4007,
"step": 264
},
{
"epoch": 1.56,
"grad_norm": 0.10153420269489288,
"learning_rate": 0.00013528976739324807,
"loss": 0.4054,
"step": 265
},
{
"epoch": 1.56,
"grad_norm": 0.10569385439157486,
"learning_rate": 0.0001348453431914159,
"loss": 0.4031,
"step": 266
},
{
"epoch": 1.57,
"grad_norm": 0.10000584274530411,
"learning_rate": 0.00013440013424613698,
"loss": 0.4018,
"step": 267
},
{
"epoch": 1.58,
"grad_norm": 0.09948629140853882,
"learning_rate": 0.00013395415058385296,
"loss": 0.3997,
"step": 268
},
{
"epoch": 1.58,
"grad_norm": 0.10179516673088074,
"learning_rate": 0.00013350740224845278,
"loss": 0.3872,
"step": 269
},
{
"epoch": 1.59,
"grad_norm": 0.0989665687084198,
"learning_rate": 0.00013305989930104638,
"loss": 0.3672,
"step": 270
},
{
"epoch": 1.59,
"grad_norm": 0.09988453984260559,
"learning_rate": 0.00013261165181973814,
"loss": 0.3978,
"step": 271
},
{
"epoch": 1.6,
"grad_norm": 0.09958979487419128,
"learning_rate": 0.00013216266989939988,
"loss": 0.3793,
"step": 272
},
{
"epoch": 1.61,
"grad_norm": 0.10080169141292572,
"learning_rate": 0.0001317129636514435,
"loss": 0.4012,
"step": 273
},
{
"epoch": 1.61,
"grad_norm": 0.10148239880800247,
"learning_rate": 0.00013126254320359343,
"loss": 0.3904,
"step": 274
},
{
"epoch": 1.62,
"grad_norm": 0.10728135704994202,
"learning_rate": 0.00013081141869965835,
"loss": 0.393,
"step": 275
},
{
"epoch": 1.62,
"grad_norm": 0.09917322546243668,
"learning_rate": 0.00013035960029930278,
"loss": 0.3725,
"step": 276
},
{
"epoch": 1.63,
"grad_norm": 0.10163102298974991,
"learning_rate": 0.00012990709817781837,
"loss": 0.406,
"step": 277
},
{
"epoch": 1.64,
"grad_norm": 0.09815432131290436,
"learning_rate": 0.00012945392252589465,
"loss": 0.3995,
"step": 278
},
{
"epoch": 1.64,
"grad_norm": 0.09806889295578003,
"learning_rate": 0.0001290000835493896,
"loss": 0.3952,
"step": 279
},
{
"epoch": 1.65,
"grad_norm": 0.09826052933931351,
"learning_rate": 0.0001285455914690997,
"loss": 0.402,
"step": 280
},
{
"epoch": 1.65,
"grad_norm": 0.09949176013469696,
"learning_rate": 0.0001280904565205299,
"loss": 0.3953,
"step": 281
},
{
"epoch": 1.66,
"grad_norm": 0.10278761386871338,
"learning_rate": 0.00012763468895366303,
"loss": 0.4091,
"step": 282
},
{
"epoch": 1.66,
"grad_norm": 0.10104485601186752,
"learning_rate": 0.0001271782990327289,
"loss": 0.3807,
"step": 283
},
{
"epoch": 1.67,
"grad_norm": 0.10103065520524979,
"learning_rate": 0.0001267212970359732,
"loss": 0.4136,
"step": 284
},
{
"epoch": 1.68,
"grad_norm": 0.10056735575199127,
"learning_rate": 0.0001262636932554261,
"loss": 0.3834,
"step": 285
},
{
"epoch": 1.68,
"grad_norm": 0.10619843006134033,
"learning_rate": 0.00012580549799667034,
"loss": 0.3926,
"step": 286
},
{
"epoch": 1.69,
"grad_norm": 0.10867036134004593,
"learning_rate": 0.00012534672157860928,
"loss": 0.4102,
"step": 287
},
{
"epoch": 1.69,
"grad_norm": 0.10176358371973038,
"learning_rate": 0.00012488737433323426,
"loss": 0.4013,
"step": 288
},
{
"epoch": 1.7,
"grad_norm": 0.09872467070817947,
"learning_rate": 0.00012442746660539227,
"loss": 0.3859,
"step": 289
},
{
"epoch": 1.71,
"grad_norm": 0.10109713673591614,
"learning_rate": 0.00012396700875255264,
"loss": 0.3779,
"step": 290
},
{
"epoch": 1.71,
"grad_norm": 0.10859864950180054,
"learning_rate": 0.00012350601114457396,
"loss": 0.4023,
"step": 291
},
{
"epoch": 1.72,
"grad_norm": 0.10092321038246155,
"learning_rate": 0.00012304448416347065,
"loss": 0.3998,
"step": 292
},
{
"epoch": 1.72,
"grad_norm": 0.10352278500795364,
"learning_rate": 0.0001225824382031789,
"loss": 0.4067,
"step": 293
},
{
"epoch": 1.73,
"grad_norm": 0.1029689610004425,
"learning_rate": 0.0001221198836693226,
"loss": 0.3656,
"step": 294
},
{
"epoch": 1.73,
"eval_loss": 0.4955105185508728,
"eval_runtime": 21.4362,
"eval_samples_per_second": 46.324,
"eval_steps_per_second": 11.616,
"step": 294
},
{
"epoch": 1.74,
"grad_norm": 0.10078923404216766,
"learning_rate": 0.00012165683097897931,
"loss": 0.4035,
"step": 295
},
{
"epoch": 1.74,
"grad_norm": 0.1020880788564682,
"learning_rate": 0.00012119329056044532,
"loss": 0.393,
"step": 296
},
{
"epoch": 1.75,
"grad_norm": 0.10287690162658691,
"learning_rate": 0.00012072927285300098,
"loss": 0.4005,
"step": 297
},
{
"epoch": 1.75,
"grad_norm": 0.10361482948064804,
"learning_rate": 0.00012026478830667551,
"loss": 0.3885,
"step": 298
},
{
"epoch": 1.76,
"grad_norm": 0.10416186600923538,
"learning_rate": 0.00011979984738201171,
"loss": 0.3997,
"step": 299
},
{
"epoch": 1.77,
"grad_norm": 0.10558196157217026,
"learning_rate": 0.00011933446054983035,
"loss": 0.4082,
"step": 300
},
{
"epoch": 1.77,
"grad_norm": 0.10211660712957382,
"learning_rate": 0.00011886863829099441,
"loss": 0.3798,
"step": 301
},
{
"epoch": 1.78,
"grad_norm": 0.10305824875831604,
"learning_rate": 0.00011840239109617302,
"loss": 0.3898,
"step": 302
},
{
"epoch": 1.78,
"grad_norm": 0.11154858022928238,
"learning_rate": 0.0001179357294656051,
"loss": 0.3921,
"step": 303
},
{
"epoch": 1.79,
"grad_norm": 0.1013292595744133,
"learning_rate": 0.00011746866390886305,
"loss": 0.3808,
"step": 304
},
{
"epoch": 1.8,
"grad_norm": 0.10282581299543381,
"learning_rate": 0.00011700120494461595,
"loss": 0.3811,
"step": 305
},
{
"epoch": 1.8,
"grad_norm": 0.10098174214363098,
"learning_rate": 0.0001165333631003928,
"loss": 0.3822,
"step": 306
},
{
"epoch": 1.81,
"grad_norm": 0.10231437534093857,
"learning_rate": 0.00011606514891234526,
"loss": 0.38,
"step": 307
},
{
"epoch": 1.81,
"grad_norm": 0.10368547588586807,
"learning_rate": 0.00011559657292501042,
"loss": 0.3863,
"step": 308
},
{
"epoch": 1.82,
"grad_norm": 0.10524257272481918,
"learning_rate": 0.00011512764569107351,
"loss": 0.4024,
"step": 309
},
{
"epoch": 1.82,
"grad_norm": 0.10808374732732773,
"learning_rate": 0.00011465837777113,
"loss": 0.4021,
"step": 310
},
{
"epoch": 1.83,
"grad_norm": 0.10130346566438675,
"learning_rate": 0.00011418877973344781,
"loss": 0.3857,
"step": 311
},
{
"epoch": 1.84,
"grad_norm": 0.10113856196403503,
"learning_rate": 0.00011371886215372951,
"loss": 0.4028,
"step": 312
},
{
"epoch": 1.84,
"grad_norm": 0.10313602536916733,
"learning_rate": 0.00011324863561487383,
"loss": 0.3897,
"step": 313
},
{
"epoch": 1.85,
"grad_norm": 0.0981384888291359,
"learning_rate": 0.00011277811070673765,
"loss": 0.3798,
"step": 314
},
{
"epoch": 1.85,
"grad_norm": 0.10432131588459015,
"learning_rate": 0.00011230729802589726,
"loss": 0.3941,
"step": 315
},
{
"epoch": 1.86,
"grad_norm": 0.09853876382112503,
"learning_rate": 0.00011183620817540986,
"loss": 0.3799,
"step": 316
},
{
"epoch": 1.87,
"grad_norm": 0.10610882192850113,
"learning_rate": 0.00011136485176457459,
"loss": 0.4034,
"step": 317
},
{
"epoch": 1.87,
"grad_norm": 0.09782709181308746,
"learning_rate": 0.00011089323940869392,
"loss": 0.3725,
"step": 318
},
{
"epoch": 1.88,
"grad_norm": 0.10068295896053314,
"learning_rate": 0.0001104213817288343,
"loss": 0.3721,
"step": 319
},
{
"epoch": 1.88,
"grad_norm": 0.10485327988862991,
"learning_rate": 0.00010994928935158702,
"loss": 0.3832,
"step": 320
},
{
"epoch": 1.89,
"grad_norm": 0.10914643853902817,
"learning_rate": 0.00010947697290882903,
"loss": 0.4039,
"step": 321
},
{
"epoch": 1.9,
"grad_norm": 0.10210110992193222,
"learning_rate": 0.00010900444303748332,
"loss": 0.3954,
"step": 322
},
{
"epoch": 1.9,
"grad_norm": 0.09887547791004181,
"learning_rate": 0.00010853171037927951,
"loss": 0.3711,
"step": 323
},
{
"epoch": 1.91,
"grad_norm": 0.1009160652756691,
"learning_rate": 0.0001080587855805141,
"loss": 0.3917,
"step": 324
},
{
"epoch": 1.91,
"grad_norm": 0.10089701414108276,
"learning_rate": 0.00010758567929181074,
"loss": 0.388,
"step": 325
},
{
"epoch": 1.92,
"grad_norm": 0.10800614953041077,
"learning_rate": 0.00010711240216788036,
"loss": 0.4067,
"step": 326
},
{
"epoch": 1.93,
"grad_norm": 0.10289803147315979,
"learning_rate": 0.00010663896486728133,
"loss": 0.3965,
"step": 327
},
{
"epoch": 1.93,
"grad_norm": 0.10536928474903107,
"learning_rate": 0.00010616537805217916,
"loss": 0.4033,
"step": 328
},
{
"epoch": 1.94,
"grad_norm": 0.10302754491567612,
"learning_rate": 0.00010569165238810666,
"loss": 0.3841,
"step": 329
},
{
"epoch": 1.94,
"grad_norm": 0.10210216045379639,
"learning_rate": 0.00010521779854372353,
"loss": 0.4043,
"step": 330
},
{
"epoch": 1.95,
"grad_norm": 0.10437451303005219,
"learning_rate": 0.00010474382719057631,
"loss": 0.4065,
"step": 331
},
{
"epoch": 1.96,
"grad_norm": 0.10098063200712204,
"learning_rate": 0.00010426974900285784,
"loss": 0.4029,
"step": 332
},
{
"epoch": 1.96,
"grad_norm": 0.09919014573097229,
"learning_rate": 0.00010379557465716696,
"loss": 0.3902,
"step": 333
},
{
"epoch": 1.97,
"grad_norm": 0.09987551718950272,
"learning_rate": 0.00010332131483226804,
"loss": 0.3901,
"step": 334
},
{
"epoch": 1.97,
"grad_norm": 0.10001327842473984,
"learning_rate": 0.00010284698020885053,
"loss": 0.3697,
"step": 335
},
{
"epoch": 1.98,
"grad_norm": 0.11250711232423782,
"learning_rate": 0.00010237258146928848,
"loss": 0.3744,
"step": 336
},
{
"epoch": 1.98,
"eval_loss": 0.4941823482513428,
"eval_runtime": 21.4596,
"eval_samples_per_second": 46.273,
"eval_steps_per_second": 11.603,
"step": 336
}
],
"logging_steps": 1,
"max_steps": 672,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 168,
"total_flos": 1.0026380477092332e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}