Jambatypus-v0.1 / checkpoint-168 /trainer_state.json
mlabonne's picture
Upload folder using huggingface_hub
44f439e verified
raw
history blame
28.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9985141158989599,
"eval_steps": 42,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.30475762367257353,
"learning_rate": 2e-05,
"loss": 0.6274,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 1.0297596454620361,
"eval_runtime": 150.6014,
"eval_samples_per_second": 1.653,
"eval_steps_per_second": 0.83,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.2798137150913395,
"learning_rate": 4e-05,
"loss": 0.6362,
"step": 2
},
{
"epoch": 0.02,
"grad_norm": 0.3178684451319545,
"learning_rate": 6e-05,
"loss": 0.6299,
"step": 3
},
{
"epoch": 0.02,
"grad_norm": 0.33651284916847835,
"learning_rate": 8e-05,
"loss": 0.6391,
"step": 4
},
{
"epoch": 0.03,
"grad_norm": 0.23984915388648712,
"learning_rate": 0.0001,
"loss": 0.6071,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 0.20514040410017348,
"learning_rate": 0.00012,
"loss": 0.5996,
"step": 6
},
{
"epoch": 0.04,
"grad_norm": 0.1950018128286362,
"learning_rate": 0.00014,
"loss": 0.6298,
"step": 7
},
{
"epoch": 0.05,
"grad_norm": 0.14246019947393238,
"learning_rate": 0.00016,
"loss": 0.5108,
"step": 8
},
{
"epoch": 0.05,
"grad_norm": 0.15792014279750227,
"learning_rate": 0.00018,
"loss": 0.5529,
"step": 9
},
{
"epoch": 0.06,
"grad_norm": 0.1517889177511264,
"learning_rate": 0.0002,
"loss": 0.5433,
"step": 10
},
{
"epoch": 0.07,
"grad_norm": 0.1372121219437277,
"learning_rate": 0.00019998023297700658,
"loss": 0.5856,
"step": 11
},
{
"epoch": 0.07,
"grad_norm": 0.16740807394942855,
"learning_rate": 0.00019992093972273018,
"loss": 0.5546,
"step": 12
},
{
"epoch": 0.08,
"grad_norm": 0.13512320693394078,
"learning_rate": 0.00019982214367819328,
"loss": 0.6193,
"step": 13
},
{
"epoch": 0.08,
"grad_norm": 0.16169796294070152,
"learning_rate": 0.0001996838839014696,
"loss": 0.5495,
"step": 14
},
{
"epoch": 0.09,
"grad_norm": 0.16796913812281988,
"learning_rate": 0.00019950621505224273,
"loss": 0.5035,
"step": 15
},
{
"epoch": 0.1,
"grad_norm": 0.1800514764162192,
"learning_rate": 0.00019928920737019733,
"loss": 0.5083,
"step": 16
},
{
"epoch": 0.1,
"grad_norm": 0.170432124866908,
"learning_rate": 0.0001990329466472502,
"loss": 0.632,
"step": 17
},
{
"epoch": 0.11,
"grad_norm": 0.19129325489749488,
"learning_rate": 0.00019873753419363336,
"loss": 0.4813,
"step": 18
},
{
"epoch": 0.11,
"grad_norm": 0.1459357988760762,
"learning_rate": 0.00019840308679784207,
"loss": 0.4973,
"step": 19
},
{
"epoch": 0.12,
"grad_norm": 0.192594730984382,
"learning_rate": 0.00019802973668046363,
"loss": 0.5291,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 0.859025467969139,
"learning_rate": 0.0001976176314419051,
"loss": 0.5296,
"step": 21
},
{
"epoch": 0.13,
"grad_norm": 0.13366297885670222,
"learning_rate": 0.000197166934004041,
"loss": 0.4819,
"step": 22
},
{
"epoch": 0.14,
"grad_norm": 0.15698714419747645,
"learning_rate": 0.00019667782254580374,
"loss": 0.5409,
"step": 23
},
{
"epoch": 0.14,
"grad_norm": 0.10995943735837355,
"learning_rate": 0.00019615049043274205,
"loss": 0.5108,
"step": 24
},
{
"epoch": 0.15,
"grad_norm": 0.10796742192788925,
"learning_rate": 0.00019558514614057609,
"loss": 0.5215,
"step": 25
},
{
"epoch": 0.15,
"grad_norm": 0.11641740089490231,
"learning_rate": 0.00019498201317277828,
"loss": 0.5012,
"step": 26
},
{
"epoch": 0.16,
"grad_norm": 0.1120175962893241,
"learning_rate": 0.00019434132997221345,
"loss": 0.474,
"step": 27
},
{
"epoch": 0.17,
"grad_norm": 0.1218171278782483,
"learning_rate": 0.0001936633498268728,
"loss": 0.5216,
"step": 28
},
{
"epoch": 0.17,
"grad_norm": 0.11718521115928844,
"learning_rate": 0.0001929483407697387,
"loss": 0.4856,
"step": 29
},
{
"epoch": 0.18,
"grad_norm": 0.12611471038571026,
"learning_rate": 0.00019219658547282067,
"loss": 0.4823,
"step": 30
},
{
"epoch": 0.18,
"grad_norm": 0.11106871615269753,
"learning_rate": 0.00019140838113540346,
"loss": 0.4869,
"step": 31
},
{
"epoch": 0.19,
"grad_norm": 0.1416503230360699,
"learning_rate": 0.00019058403936655233,
"loss": 0.5341,
"step": 32
},
{
"epoch": 0.2,
"grad_norm": 0.10761396399791698,
"learning_rate": 0.00018972388606192125,
"loss": 0.4304,
"step": 33
},
{
"epoch": 0.2,
"grad_norm": 0.10975376180434356,
"learning_rate": 0.0001888282612749132,
"loss": 0.4646,
"step": 34
},
{
"epoch": 0.21,
"grad_norm": 0.12848879670359908,
"learning_rate": 0.00018789751908224338,
"loss": 0.4972,
"step": 35
},
{
"epoch": 0.21,
"grad_norm": 0.11904721819683833,
"learning_rate": 0.00018693202744395827,
"loss": 0.505,
"step": 36
},
{
"epoch": 0.22,
"grad_norm": 0.12249852034224981,
"learning_rate": 0.00018593216805796612,
"loss": 0.5396,
"step": 37
},
{
"epoch": 0.23,
"grad_norm": 0.12453395046646995,
"learning_rate": 0.00018489833620913642,
"loss": 0.4917,
"step": 38
},
{
"epoch": 0.23,
"grad_norm": 0.12585770374422164,
"learning_rate": 0.00018383094061302766,
"loss": 0.5079,
"step": 39
},
{
"epoch": 0.24,
"grad_norm": 0.11095271476322731,
"learning_rate": 0.00018273040325430574,
"loss": 0.4812,
"step": 40
},
{
"epoch": 0.24,
"grad_norm": 0.12968115101635422,
"learning_rate": 0.00018159715921991612,
"loss": 0.5106,
"step": 41
},
{
"epoch": 0.25,
"grad_norm": 0.10933018515590627,
"learning_rate": 0.00018043165652707649,
"loss": 0.4403,
"step": 42
},
{
"epoch": 0.25,
"eval_loss": 0.9767947196960449,
"eval_runtime": 152.0343,
"eval_samples_per_second": 1.638,
"eval_steps_per_second": 0.822,
"step": 42
},
{
"epoch": 0.26,
"grad_norm": 0.11525719626792096,
"learning_rate": 0.00017923435594615744,
"loss": 0.482,
"step": 43
},
{
"epoch": 0.26,
"grad_norm": 0.12962154411778218,
"learning_rate": 0.00017800573081852122,
"loss": 0.5452,
"step": 44
},
{
"epoch": 0.27,
"grad_norm": 0.12555700120045588,
"learning_rate": 0.0001767462668693908,
"loss": 0.5084,
"step": 45
},
{
"epoch": 0.27,
"grad_norm": 0.11427565378293324,
"learning_rate": 0.00017545646201582303,
"loss": 0.5191,
"step": 46
},
{
"epoch": 0.28,
"grad_norm": 0.10974901402857151,
"learning_rate": 0.00017413682616986185,
"loss": 0.4703,
"step": 47
},
{
"epoch": 0.29,
"grad_norm": 0.11781465084480325,
"learning_rate": 0.00017278788103694943,
"loss": 0.4548,
"step": 48
},
{
"epoch": 0.29,
"grad_norm": 0.10781807228559999,
"learning_rate": 0.000171410159909675,
"loss": 0.476,
"step": 49
},
{
"epoch": 0.3,
"grad_norm": 0.12502639462035098,
"learning_rate": 0.00017000420745694254,
"loss": 0.5084,
"step": 50
},
{
"epoch": 0.3,
"grad_norm": 0.10718920826593327,
"learning_rate": 0.00016857057950864132,
"loss": 0.5093,
"step": 51
},
{
"epoch": 0.31,
"grad_norm": 0.10040549880547282,
"learning_rate": 0.0001671098428359037,
"loss": 0.4644,
"step": 52
},
{
"epoch": 0.32,
"grad_norm": 0.11778478994740472,
"learning_rate": 0.00016562257492703757,
"loss": 0.4725,
"step": 53
},
{
"epoch": 0.32,
"grad_norm": 0.1008386031049932,
"learning_rate": 0.000164109363759222,
"loss": 0.5121,
"step": 54
},
{
"epoch": 0.33,
"grad_norm": 0.1170302528140235,
"learning_rate": 0.000162570807566056,
"loss": 0.4766,
"step": 55
},
{
"epoch": 0.33,
"grad_norm": 0.1104526773884303,
"learning_rate": 0.00016100751460105243,
"loss": 0.4886,
"step": 56
},
{
"epoch": 0.34,
"grad_norm": 0.10467920768691032,
"learning_rate": 0.00015942010289717105,
"loss": 0.4703,
"step": 57
},
{
"epoch": 0.34,
"grad_norm": 0.11551406829220555,
"learning_rate": 0.00015780920002248484,
"loss": 0.4837,
"step": 58
},
{
"epoch": 0.35,
"grad_norm": 0.11133818831887894,
"learning_rate": 0.0001561754428320771,
"loss": 0.5148,
"step": 59
},
{
"epoch": 0.36,
"grad_norm": 0.11281448423273216,
"learning_rate": 0.00015451947721626676,
"loss": 0.4561,
"step": 60
},
{
"epoch": 0.36,
"grad_norm": 0.13934126997471205,
"learning_rate": 0.00015284195784526195,
"loss": 0.5069,
"step": 61
},
{
"epoch": 0.37,
"grad_norm": 0.11851655387640142,
"learning_rate": 0.00015114354791034225,
"loss": 0.5094,
"step": 62
},
{
"epoch": 0.37,
"grad_norm": 0.12909148374566123,
"learning_rate": 0.0001494249188616723,
"loss": 0.581,
"step": 63
},
{
"epoch": 0.38,
"grad_norm": 0.11070161341925377,
"learning_rate": 0.00014768675014285062,
"loss": 0.4585,
"step": 64
},
{
"epoch": 0.39,
"grad_norm": 0.13308674882888374,
"learning_rate": 0.00014592972892229778,
"loss": 0.4974,
"step": 65
},
{
"epoch": 0.39,
"grad_norm": 0.12124588853708144,
"learning_rate": 0.0001441545498215912,
"loss": 0.4463,
"step": 66
},
{
"epoch": 0.4,
"grad_norm": 0.1183570515369953,
"learning_rate": 0.00014236191464085286,
"loss": 0.447,
"step": 67
},
{
"epoch": 0.4,
"grad_norm": 0.13520024884417237,
"learning_rate": 0.00014055253208129938,
"loss": 0.5309,
"step": 68
},
{
"epoch": 0.41,
"grad_norm": 0.12184981458813801,
"learning_rate": 0.00013872711746506413,
"loss": 0.4532,
"step": 69
},
{
"epoch": 0.42,
"grad_norm": 0.12449299540645078,
"learning_rate": 0.00013688639245240078,
"loss": 0.5198,
"step": 70
},
{
"epoch": 0.42,
"grad_norm": 0.1383134750490429,
"learning_rate": 0.00013503108475638244,
"loss": 0.5629,
"step": 71
},
{
"epoch": 0.43,
"grad_norm": 0.246237001656926,
"learning_rate": 0.0001331619278552068,
"loss": 0.4869,
"step": 72
},
{
"epoch": 0.43,
"grad_norm": 0.13337703940933632,
"learning_rate": 0.00013127966070222274,
"loss": 0.4792,
"step": 73
},
{
"epoch": 0.44,
"grad_norm": 0.12428922033806454,
"learning_rate": 0.00012938502743379212,
"loss": 0.4825,
"step": 74
},
{
"epoch": 0.45,
"grad_norm": 0.13290774912900208,
"learning_rate": 0.00012747877707510252,
"loss": 0.5138,
"step": 75
},
{
"epoch": 0.45,
"grad_norm": 0.11185975046756892,
"learning_rate": 0.0001255616632440475,
"loss": 0.4815,
"step": 76
},
{
"epoch": 0.46,
"grad_norm": 0.1130592868215497,
"learning_rate": 0.0001236344438532905,
"loss": 0.5046,
"step": 77
},
{
"epoch": 0.46,
"grad_norm": 0.12882943465594857,
"learning_rate": 0.0001216978808106318,
"loss": 0.5091,
"step": 78
},
{
"epoch": 0.47,
"grad_norm": 0.14837896297082676,
"learning_rate": 0.00011975273971779528,
"loss": 0.5158,
"step": 79
},
{
"epoch": 0.48,
"grad_norm": 0.1265223309856292,
"learning_rate": 0.00011779978956775506,
"loss": 0.5068,
"step": 80
},
{
"epoch": 0.48,
"grad_norm": 0.14042502330520407,
"learning_rate": 0.0001158398024407215,
"loss": 0.5061,
"step": 81
},
{
"epoch": 0.49,
"grad_norm": 0.1261526695491767,
"learning_rate": 0.00011387355319890685,
"loss": 0.4691,
"step": 82
},
{
"epoch": 0.49,
"grad_norm": 0.12007305451001854,
"learning_rate": 0.00011190181918019049,
"loss": 0.4753,
"step": 83
},
{
"epoch": 0.5,
"grad_norm": 0.12809956897166885,
"learning_rate": 0.00010992537989080618,
"loss": 0.4417,
"step": 84
},
{
"epoch": 0.5,
"eval_loss": 0.9675251841545105,
"eval_runtime": 152.4793,
"eval_samples_per_second": 1.633,
"eval_steps_per_second": 0.82,
"step": 84
},
{
"epoch": 0.51,
"grad_norm": 0.11858329804793687,
"learning_rate": 0.00010794501669717145,
"loss": 0.4868,
"step": 85
},
{
"epoch": 0.51,
"grad_norm": 0.10984649953887334,
"learning_rate": 0.00010596151251698199,
"loss": 0.4598,
"step": 86
},
{
"epoch": 0.52,
"grad_norm": 0.10927203986256682,
"learning_rate": 0.0001039756515096926,
"loss": 0.4693,
"step": 87
},
{
"epoch": 0.52,
"grad_norm": 0.11205046531522328,
"learning_rate": 0.00010198821876650701,
"loss": 0.4921,
"step": 88
},
{
"epoch": 0.53,
"grad_norm": 0.13232347270009215,
"learning_rate": 0.0001,
"loss": 0.4695,
"step": 89
},
{
"epoch": 0.53,
"grad_norm": 0.12136881873560385,
"learning_rate": 9.801178123349298e-05,
"loss": 0.4859,
"step": 90
},
{
"epoch": 0.54,
"grad_norm": 0.14347476421156694,
"learning_rate": 9.602434849030745e-05,
"loss": 0.4796,
"step": 91
},
{
"epoch": 0.55,
"grad_norm": 0.13956845267055204,
"learning_rate": 9.403848748301802e-05,
"loss": 0.5339,
"step": 92
},
{
"epoch": 0.55,
"grad_norm": 0.12814010903196785,
"learning_rate": 9.205498330282856e-05,
"loss": 0.5267,
"step": 93
},
{
"epoch": 0.56,
"grad_norm": 0.12798850330908082,
"learning_rate": 9.007462010919386e-05,
"loss": 0.4604,
"step": 94
},
{
"epoch": 0.56,
"grad_norm": 0.13673366056605873,
"learning_rate": 8.809818081980953e-05,
"loss": 0.49,
"step": 95
},
{
"epoch": 0.57,
"grad_norm": 0.12607483394599764,
"learning_rate": 8.612644680109319e-05,
"loss": 0.4774,
"step": 96
},
{
"epoch": 0.58,
"grad_norm": 0.1365629261848207,
"learning_rate": 8.416019755927851e-05,
"loss": 0.4827,
"step": 97
},
{
"epoch": 0.58,
"grad_norm": 0.12122559291940836,
"learning_rate": 8.2200210432245e-05,
"loss": 0.5044,
"step": 98
},
{
"epoch": 0.59,
"grad_norm": 0.11655390642565265,
"learning_rate": 8.024726028220474e-05,
"loss": 0.503,
"step": 99
},
{
"epoch": 0.59,
"grad_norm": 0.12394574502796742,
"learning_rate": 7.83021191893682e-05,
"loss": 0.491,
"step": 100
},
{
"epoch": 0.6,
"grad_norm": 0.14922841699852962,
"learning_rate": 7.636555614670953e-05,
"loss": 0.457,
"step": 101
},
{
"epoch": 0.61,
"grad_norm": 0.11076924096187928,
"learning_rate": 7.443833675595255e-05,
"loss": 0.4603,
"step": 102
},
{
"epoch": 0.61,
"grad_norm": 0.1263594611752413,
"learning_rate": 7.252122292489747e-05,
"loss": 0.4859,
"step": 103
},
{
"epoch": 0.62,
"grad_norm": 0.11432575178505003,
"learning_rate": 7.061497256620793e-05,
"loss": 0.4627,
"step": 104
},
{
"epoch": 0.62,
"grad_norm": 0.1078119391965793,
"learning_rate": 6.87203392977773e-05,
"loss": 0.4829,
"step": 105
},
{
"epoch": 0.63,
"grad_norm": 0.12752089816514908,
"learning_rate": 6.683807214479323e-05,
"loss": 0.46,
"step": 106
},
{
"epoch": 0.64,
"grad_norm": 0.11421623043902956,
"learning_rate": 6.496891524361757e-05,
"loss": 0.4429,
"step": 107
},
{
"epoch": 0.64,
"grad_norm": 0.10432253193399477,
"learning_rate": 6.311360754759923e-05,
"loss": 0.402,
"step": 108
},
{
"epoch": 0.65,
"grad_norm": 0.12155248673662734,
"learning_rate": 6.127288253493591e-05,
"loss": 0.5126,
"step": 109
},
{
"epoch": 0.65,
"grad_norm": 0.14266947863559803,
"learning_rate": 5.9447467918700614e-05,
"loss": 0.4821,
"step": 110
},
{
"epoch": 0.66,
"grad_norm": 0.14851250761112514,
"learning_rate": 5.763808535914723e-05,
"loss": 0.4891,
"step": 111
},
{
"epoch": 0.67,
"grad_norm": 0.14264023747361737,
"learning_rate": 5.584545017840885e-05,
"loss": 0.5181,
"step": 112
},
{
"epoch": 0.67,
"grad_norm": 0.12837168363458795,
"learning_rate": 5.407027107770219e-05,
"loss": 0.5599,
"step": 113
},
{
"epoch": 0.68,
"grad_norm": 0.11874709251257598,
"learning_rate": 5.2313249857149414e-05,
"loss": 0.4536,
"step": 114
},
{
"epoch": 0.68,
"grad_norm": 0.12010754957532713,
"learning_rate": 5.0575081138327715e-05,
"loss": 0.5004,
"step": 115
},
{
"epoch": 0.69,
"grad_norm": 0.13464124440677885,
"learning_rate": 4.885645208965779e-05,
"loss": 0.4985,
"step": 116
},
{
"epoch": 0.7,
"grad_norm": 0.13701854261941088,
"learning_rate": 4.715804215473809e-05,
"loss": 0.4709,
"step": 117
},
{
"epoch": 0.7,
"grad_norm": 0.1335483738873249,
"learning_rate": 4.548052278373327e-05,
"loss": 0.4735,
"step": 118
},
{
"epoch": 0.71,
"grad_norm": 0.13603172024059101,
"learning_rate": 4.382455716792291e-05,
"loss": 0.4721,
"step": 119
},
{
"epoch": 0.71,
"grad_norm": 0.13843339239058639,
"learning_rate": 4.219079997751515e-05,
"loss": 0.4954,
"step": 120
},
{
"epoch": 0.72,
"grad_norm": 0.15011169526780793,
"learning_rate": 4.0579897102828966e-05,
"loss": 0.4648,
"step": 121
},
{
"epoch": 0.73,
"grad_norm": 0.13061595453081623,
"learning_rate": 3.899248539894757e-05,
"loss": 0.4801,
"step": 122
},
{
"epoch": 0.73,
"grad_norm": 0.14067787924603412,
"learning_rate": 3.7429192433944014e-05,
"loss": 0.4805,
"step": 123
},
{
"epoch": 0.74,
"grad_norm": 0.13420057703295998,
"learning_rate": 3.589063624077802e-05,
"loss": 0.4446,
"step": 124
},
{
"epoch": 0.74,
"grad_norm": 0.14083737654873127,
"learning_rate": 3.4377425072962465e-05,
"loss": 0.46,
"step": 125
},
{
"epoch": 0.75,
"grad_norm": 0.13231889777376862,
"learning_rate": 3.289015716409631e-05,
"loss": 0.4451,
"step": 126
},
{
"epoch": 0.75,
"eval_loss": 0.9651579260826111,
"eval_runtime": 155.5959,
"eval_samples_per_second": 1.6,
"eval_steps_per_second": 0.803,
"step": 126
},
{
"epoch": 0.75,
"grad_norm": 0.1119782513388846,
"learning_rate": 3.14294204913587e-05,
"loss": 0.4728,
"step": 127
},
{
"epoch": 0.76,
"grad_norm": 0.1258610262132018,
"learning_rate": 2.9995792543057478e-05,
"loss": 0.4793,
"step": 128
},
{
"epoch": 0.77,
"grad_norm": 0.1332956040603169,
"learning_rate": 2.8589840090325027e-05,
"loss": 0.4919,
"step": 129
},
{
"epoch": 0.77,
"grad_norm": 0.13669237836174272,
"learning_rate": 2.7212118963050592e-05,
"loss": 0.4863,
"step": 130
},
{
"epoch": 0.78,
"grad_norm": 0.13718073990096244,
"learning_rate": 2.586317383013821e-05,
"loss": 0.4652,
"step": 131
},
{
"epoch": 0.78,
"grad_norm": 0.1328715725965393,
"learning_rate": 2.4543537984176978e-05,
"loss": 0.4464,
"step": 132
},
{
"epoch": 0.79,
"grad_norm": 0.12474829320490133,
"learning_rate": 2.325373313060919e-05,
"loss": 0.501,
"step": 133
},
{
"epoch": 0.8,
"grad_norm": 0.13832785688421897,
"learning_rate": 2.19942691814788e-05,
"loss": 0.5113,
"step": 134
},
{
"epoch": 0.8,
"grad_norm": 0.13446374612957607,
"learning_rate": 2.076564405384258e-05,
"loss": 0.452,
"step": 135
},
{
"epoch": 0.81,
"grad_norm": 0.15087962227561943,
"learning_rate": 1.9568343472923524e-05,
"loss": 0.5114,
"step": 136
},
{
"epoch": 0.81,
"grad_norm": 0.13471199433579678,
"learning_rate": 1.840284078008393e-05,
"loss": 0.5074,
"step": 137
},
{
"epoch": 0.82,
"grad_norm": 0.14471602871304384,
"learning_rate": 1.7269596745694295e-05,
"loss": 0.4812,
"step": 138
},
{
"epoch": 0.83,
"grad_norm": 0.12791095480012135,
"learning_rate": 1.616905938697234e-05,
"loss": 0.439,
"step": 139
},
{
"epoch": 0.83,
"grad_norm": 0.13395748535894433,
"learning_rate": 1.5101663790863596e-05,
"loss": 0.4942,
"step": 140
},
{
"epoch": 0.84,
"grad_norm": 0.1216157843375959,
"learning_rate": 1.4067831942033904e-05,
"loss": 0.4904,
"step": 141
},
{
"epoch": 0.84,
"grad_norm": 0.12513029490165878,
"learning_rate": 1.3067972556041752e-05,
"loss": 0.4583,
"step": 142
},
{
"epoch": 0.85,
"grad_norm": 0.11835922329329535,
"learning_rate": 1.210248091775663e-05,
"loss": 0.5281,
"step": 143
},
{
"epoch": 0.86,
"grad_norm": 0.14513302866752326,
"learning_rate": 1.1171738725086833e-05,
"loss": 0.4432,
"step": 144
},
{
"epoch": 0.86,
"grad_norm": 0.11934584390756656,
"learning_rate": 1.0276113938078769e-05,
"loss": 0.4664,
"step": 145
},
{
"epoch": 0.87,
"grad_norm": 0.11746932949614314,
"learning_rate": 9.415960633447674e-06,
"loss": 0.4452,
"step": 146
},
{
"epoch": 0.87,
"grad_norm": 0.1183312470782195,
"learning_rate": 8.59161886459654e-06,
"loss": 0.4858,
"step": 147
},
{
"epoch": 0.88,
"grad_norm": 0.1226342260426643,
"learning_rate": 7.803414527179343e-06,
"loss": 0.4609,
"step": 148
},
{
"epoch": 0.89,
"grad_norm": 0.12849903086517628,
"learning_rate": 7.051659230261298e-06,
"loss": 0.4992,
"step": 149
},
{
"epoch": 0.89,
"grad_norm": 0.12558798780888547,
"learning_rate": 6.336650173127223e-06,
"loss": 0.5055,
"step": 150
},
{
"epoch": 0.9,
"grad_norm": 0.11991457404362381,
"learning_rate": 5.658670027786561e-06,
"loss": 0.4711,
"step": 151
},
{
"epoch": 0.9,
"grad_norm": 0.14486189343828018,
"learning_rate": 5.017986827221733e-06,
"loss": 0.4984,
"step": 152
},
{
"epoch": 0.91,
"grad_norm": 0.14292272311516058,
"learning_rate": 4.4148538594239174e-06,
"loss": 0.518,
"step": 153
},
{
"epoch": 0.92,
"grad_norm": 0.12669351883956026,
"learning_rate": 3.849509567257959e-06,
"loss": 0.509,
"step": 154
},
{
"epoch": 0.92,
"grad_norm": 0.14745027617256945,
"learning_rate": 3.3221774541962845e-06,
"loss": 0.4655,
"step": 155
},
{
"epoch": 0.93,
"grad_norm": 0.13904171638337365,
"learning_rate": 2.8330659959589946e-06,
"loss": 0.445,
"step": 156
},
{
"epoch": 0.93,
"grad_norm": 0.12971202836694892,
"learning_rate": 2.3823685580949273e-06,
"loss": 0.4534,
"step": 157
},
{
"epoch": 0.94,
"grad_norm": 0.13437185949159883,
"learning_rate": 1.9702633195363917e-06,
"loss": 0.4785,
"step": 158
},
{
"epoch": 0.95,
"grad_norm": 0.12319276879857373,
"learning_rate": 1.5969132021579347e-06,
"loss": 0.5011,
"step": 159
},
{
"epoch": 0.95,
"grad_norm": 0.12918661435233847,
"learning_rate": 1.2624658063666639e-06,
"loss": 0.5236,
"step": 160
},
{
"epoch": 0.96,
"grad_norm": 0.12874241011432666,
"learning_rate": 9.670533527498137e-07,
"loss": 0.4439,
"step": 161
},
{
"epoch": 0.96,
"grad_norm": 0.1315361924610259,
"learning_rate": 7.10792629802659e-07,
"loss": 0.4929,
"step": 162
},
{
"epoch": 0.97,
"grad_norm": 0.1336404295007477,
"learning_rate": 4.937849477572587e-07,
"loss": 0.433,
"step": 163
},
{
"epoch": 0.97,
"grad_norm": 0.11865993758025514,
"learning_rate": 3.161160985304168e-07,
"loss": 0.4266,
"step": 164
},
{
"epoch": 0.98,
"grad_norm": 0.12559739797694514,
"learning_rate": 1.7785632180670198e-07,
"loss": 0.436,
"step": 165
},
{
"epoch": 0.99,
"grad_norm": 0.12409278477769135,
"learning_rate": 7.906027726981568e-08,
"loss": 0.5017,
"step": 166
},
{
"epoch": 0.99,
"grad_norm": 0.1398337808177678,
"learning_rate": 1.976702299344435e-08,
"loss": 0.5112,
"step": 167
},
{
"epoch": 1.0,
"grad_norm": 0.11434132222175684,
"learning_rate": 0.0,
"loss": 0.4616,
"step": 168
},
{
"epoch": 1.0,
"eval_loss": 0.9651336073875427,
"eval_runtime": 154.7455,
"eval_samples_per_second": 1.609,
"eval_steps_per_second": 0.808,
"step": 168
}
],
"logging_steps": 1,
"max_steps": 168,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"total_flos": 3.406567776272253e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}