|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.967741935483871, |
|
"eval_steps": 500, |
|
"global_step": 108, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018433179723502304, |
|
"grad_norm": 0.16578913731865466, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 0.5087, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03686635944700461, |
|
"grad_norm": 0.1722925976557175, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4998, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.055299539170506916, |
|
"grad_norm": 0.162572172422981, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.5098, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07373271889400922, |
|
"grad_norm": 0.15599834075161614, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.5042, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.09216589861751152, |
|
"grad_norm": 0.12177646639247991, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.5071, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11059907834101383, |
|
"grad_norm": 0.1237088515175474, |
|
"learning_rate": 3e-06, |
|
"loss": 0.5017, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.12903225806451613, |
|
"grad_norm": 0.16630602309614123, |
|
"learning_rate": 3.5000000000000004e-06, |
|
"loss": 0.5048, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.14746543778801843, |
|
"grad_norm": 0.27010444722015153, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.5101, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.16589861751152074, |
|
"grad_norm": 0.29479730440390195, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.5076, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.18433179723502305, |
|
"grad_norm": 0.1759601385954911, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5081, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.20276497695852536, |
|
"grad_norm": 0.16293907501833735, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.5058, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.22119815668202766, |
|
"grad_norm": 0.2104524729834747, |
|
"learning_rate": 6e-06, |
|
"loss": 0.5041, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.23963133640552994, |
|
"grad_norm": 0.3439111046909263, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.5028, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 0.1896879332677983, |
|
"learning_rate": 7.000000000000001e-06, |
|
"loss": 0.5063, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.2764976958525346, |
|
"grad_norm": 0.16556412873045434, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.5008, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29493087557603687, |
|
"grad_norm": 0.18454200495588122, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.5025, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.31336405529953915, |
|
"grad_norm": 0.19016194104381942, |
|
"learning_rate": 8.500000000000002e-06, |
|
"loss": 0.506, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3317972350230415, |
|
"grad_norm": 0.1756522900201089, |
|
"learning_rate": 9e-06, |
|
"loss": 0.5065, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.35023041474654376, |
|
"grad_norm": 0.20347747202770788, |
|
"learning_rate": 9.5e-06, |
|
"loss": 0.5018, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.3686635944700461, |
|
"grad_norm": 0.17281128792073364, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4985, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3870967741935484, |
|
"grad_norm": 0.16864344707112827, |
|
"learning_rate": 1.05e-05, |
|
"loss": 0.5001, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4055299539170507, |
|
"grad_norm": 0.180006594373567, |
|
"learning_rate": 1.1000000000000001e-05, |
|
"loss": 0.4957, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.423963133640553, |
|
"grad_norm": 0.22019846104588628, |
|
"learning_rate": 1.1500000000000002e-05, |
|
"loss": 0.4994, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.4423963133640553, |
|
"grad_norm": 0.29450471197920014, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.4975, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4608294930875576, |
|
"grad_norm": 0.3531458750461238, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.5024, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4792626728110599, |
|
"grad_norm": 0.2704221537114728, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 0.5036, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4976958525345622, |
|
"grad_norm": 0.21807981567443038, |
|
"learning_rate": 1.3500000000000001e-05, |
|
"loss": 0.4963, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 0.2537511686453976, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.5014, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5345622119815668, |
|
"grad_norm": 0.27282609851480566, |
|
"learning_rate": 1.45e-05, |
|
"loss": 0.4952, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5529953917050692, |
|
"grad_norm": 0.1811493216272178, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.4945, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.2154826922529899, |
|
"learning_rate": 1.55e-05, |
|
"loss": 0.4927, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5898617511520737, |
|
"grad_norm": 0.27147274813275624, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.4955, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.6082949308755761, |
|
"grad_norm": 0.3148285453024877, |
|
"learning_rate": 1.65e-05, |
|
"loss": 0.4944, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6267281105990783, |
|
"grad_norm": 0.34859491123867103, |
|
"learning_rate": 1.7000000000000003e-05, |
|
"loss": 0.4905, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.6451612903225806, |
|
"grad_norm": 0.42577058166978227, |
|
"learning_rate": 1.75e-05, |
|
"loss": 0.4913, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.663594470046083, |
|
"grad_norm": 0.4904668611850979, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.496, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6820276497695853, |
|
"grad_norm": 0.45085408352277095, |
|
"learning_rate": 1.85e-05, |
|
"loss": 0.497, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.7004608294930875, |
|
"grad_norm": 0.3728335955961086, |
|
"learning_rate": 1.9e-05, |
|
"loss": 0.4924, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.7188940092165899, |
|
"grad_norm": 0.31712845935435935, |
|
"learning_rate": 1.9500000000000003e-05, |
|
"loss": 0.4971, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7373271889400922, |
|
"grad_norm": 0.35626608232389995, |
|
"learning_rate": 2e-05, |
|
"loss": 0.4937, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7557603686635944, |
|
"grad_norm": 0.36587367082049105, |
|
"learning_rate": 2.05e-05, |
|
"loss": 0.4879, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7741935483870968, |
|
"grad_norm": 0.34644598131926585, |
|
"learning_rate": 2.1e-05, |
|
"loss": 0.4978, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7926267281105991, |
|
"grad_norm": 0.3590839478187489, |
|
"learning_rate": 2.15e-05, |
|
"loss": 0.4933, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.8110599078341014, |
|
"grad_norm": 0.4208361801021048, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.4945, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.8294930875576036, |
|
"grad_norm": 0.4476283919996847, |
|
"learning_rate": 2.25e-05, |
|
"loss": 0.4892, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.847926267281106, |
|
"grad_norm": 0.36958185599300014, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 0.4949, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8663594470046083, |
|
"grad_norm": 0.2911820871345684, |
|
"learning_rate": 2.35e-05, |
|
"loss": 0.4909, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8847926267281107, |
|
"grad_norm": 0.30858968380534924, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.4885, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.9032258064516129, |
|
"grad_norm": 0.29278185745501223, |
|
"learning_rate": 2.45e-05, |
|
"loss": 0.4924, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.9216589861751152, |
|
"grad_norm": 0.2544353522559892, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.4875, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9400921658986175, |
|
"grad_norm": 0.2715960030565017, |
|
"learning_rate": 2.5500000000000003e-05, |
|
"loss": 0.4868, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9585253456221198, |
|
"grad_norm": 0.3073179286108365, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.4889, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.9769585253456221, |
|
"grad_norm": 0.3371193687132362, |
|
"learning_rate": 2.6500000000000004e-05, |
|
"loss": 0.4898, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9953917050691244, |
|
"grad_norm": 0.33074577044400744, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 0.4841, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.0138248847926268, |
|
"grad_norm": 0.3297515468133034, |
|
"learning_rate": 2.7500000000000004e-05, |
|
"loss": 0.4884, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.0092165898617511, |
|
"grad_norm": 0.4029640463193266, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 0.459, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.0276497695852536, |
|
"grad_norm": 0.4940605439662155, |
|
"learning_rate": 2.8499999999999998e-05, |
|
"loss": 0.4237, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.0460829493087558, |
|
"grad_norm": 0.6088906714727877, |
|
"learning_rate": 2.9e-05, |
|
"loss": 0.431, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.064516129032258, |
|
"grad_norm": 0.7906992222694273, |
|
"learning_rate": 2.95e-05, |
|
"loss": 0.4386, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.0829493087557605, |
|
"grad_norm": 0.8776764408330847, |
|
"learning_rate": 3e-05, |
|
"loss": 0.4377, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.1013824884792627, |
|
"grad_norm": 0.8163334281126426, |
|
"learning_rate": 3.05e-05, |
|
"loss": 0.4356, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.119815668202765, |
|
"grad_norm": 0.8591188461532855, |
|
"learning_rate": 3.1e-05, |
|
"loss": 0.4343, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.1382488479262673, |
|
"grad_norm": 0.7423520884399006, |
|
"learning_rate": 3.15e-05, |
|
"loss": 0.4396, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1566820276497696, |
|
"grad_norm": 0.6559077625993845, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.4393, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.1751152073732718, |
|
"grad_norm": 0.5192220132180586, |
|
"learning_rate": 3.2500000000000004e-05, |
|
"loss": 0.4268, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.1935483870967742, |
|
"grad_norm": 0.4721316464238954, |
|
"learning_rate": 3.3e-05, |
|
"loss": 0.4299, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.2119815668202765, |
|
"grad_norm": 0.37162261772983884, |
|
"learning_rate": 3.35e-05, |
|
"loss": 0.428, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.230414746543779, |
|
"grad_norm": 0.390597086956948, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 0.4242, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.2488479262672811, |
|
"grad_norm": 0.3667364763504883, |
|
"learning_rate": 3.45e-05, |
|
"loss": 0.4308, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.2672811059907834, |
|
"grad_norm": 0.4413076637014959, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.4175, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.3828593150648835, |
|
"learning_rate": 3.55e-05, |
|
"loss": 0.4255, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.304147465437788, |
|
"grad_norm": 0.3422241428662135, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.4206, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.3225806451612903, |
|
"grad_norm": 0.419503312255047, |
|
"learning_rate": 3.65e-05, |
|
"loss": 0.4216, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.3410138248847927, |
|
"grad_norm": 0.39398530533795917, |
|
"learning_rate": 3.7e-05, |
|
"loss": 0.4186, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.359447004608295, |
|
"grad_norm": 0.31275590078306537, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.4192, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.3778801843317972, |
|
"grad_norm": 0.283726761659876, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.4134, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.3963133640552996, |
|
"grad_norm": 0.2795686696921213, |
|
"learning_rate": 3.85e-05, |
|
"loss": 0.4201, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.4147465437788018, |
|
"grad_norm": 0.2573708572632348, |
|
"learning_rate": 3.9000000000000006e-05, |
|
"loss": 0.4174, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.4331797235023043, |
|
"grad_norm": 0.26179179902269994, |
|
"learning_rate": 3.9500000000000005e-05, |
|
"loss": 0.4186, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.4516129032258065, |
|
"grad_norm": 0.25383918207396766, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4115, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4700460829493087, |
|
"grad_norm": 0.26752441715146946, |
|
"learning_rate": 4.05e-05, |
|
"loss": 0.4164, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.488479262672811, |
|
"grad_norm": 0.3298159183943398, |
|
"learning_rate": 4.1e-05, |
|
"loss": 0.4199, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.5069124423963134, |
|
"grad_norm": 0.38660808750412323, |
|
"learning_rate": 4.15e-05, |
|
"loss": 0.417, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.5253456221198156, |
|
"grad_norm": 0.485211204279941, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.4193, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.543778801843318, |
|
"grad_norm": 0.6255151017178746, |
|
"learning_rate": 4.25e-05, |
|
"loss": 0.4264, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.5622119815668203, |
|
"grad_norm": 0.7700118743780004, |
|
"learning_rate": 4.3e-05, |
|
"loss": 0.4287, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.5806451612903225, |
|
"grad_norm": 0.6615400048001224, |
|
"learning_rate": 4.35e-05, |
|
"loss": 0.4277, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.5990783410138247, |
|
"grad_norm": 0.4437937981053966, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.4232, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.6175115207373272, |
|
"grad_norm": 0.567061608533075, |
|
"learning_rate": 4.4500000000000004e-05, |
|
"loss": 0.4258, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.6359447004608296, |
|
"grad_norm": 0.4918052804723442, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.4228, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6543778801843319, |
|
"grad_norm": 0.496826066236439, |
|
"learning_rate": 4.55e-05, |
|
"loss": 0.4229, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.672811059907834, |
|
"grad_norm": 0.4731592011924632, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.4215, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.6912442396313363, |
|
"grad_norm": 0.36878471669976337, |
|
"learning_rate": 4.6500000000000005e-05, |
|
"loss": 0.4193, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.7096774193548387, |
|
"grad_norm": 0.42898764923079724, |
|
"learning_rate": 4.7e-05, |
|
"loss": 0.416, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.728110599078341, |
|
"grad_norm": 0.3959738727253371, |
|
"learning_rate": 4.75e-05, |
|
"loss": 0.4186, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.7465437788018434, |
|
"grad_norm": 0.3284621544244543, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.4159, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.7649769585253456, |
|
"grad_norm": 0.3687303178188624, |
|
"learning_rate": 4.85e-05, |
|
"loss": 0.4177, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.7834101382488479, |
|
"grad_norm": 0.36139089440724775, |
|
"learning_rate": 4.9e-05, |
|
"loss": 0.4198, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.80184331797235, |
|
"grad_norm": 0.4663478454832621, |
|
"learning_rate": 4.9500000000000004e-05, |
|
"loss": 0.4264, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.8202764976958525, |
|
"grad_norm": 0.5056372967287713, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4217, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.838709677419355, |
|
"grad_norm": 0.5324522359135441, |
|
"learning_rate": 4.8096988312782174e-05, |
|
"loss": 0.4216, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 0.4621746495598758, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 0.4225, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.8755760368663594, |
|
"grad_norm": 0.3415822814753156, |
|
"learning_rate": 3.456708580912725e-05, |
|
"loss": 0.4187, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.8940092165898617, |
|
"grad_norm": 0.30176145892972905, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.4175, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.912442396313364, |
|
"grad_norm": 0.2511920801182371, |
|
"learning_rate": 1.5432914190872757e-05, |
|
"loss": 0.4164, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.9308755760368663, |
|
"grad_norm": 0.21487586752475282, |
|
"learning_rate": 7.3223304703363135e-06, |
|
"loss": 0.4144, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.9493087557603688, |
|
"grad_norm": 0.1707899105775025, |
|
"learning_rate": 1.9030116872178316e-06, |
|
"loss": 0.4152, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.967741935483871, |
|
"grad_norm": 0.16592415069318403, |
|
"learning_rate": 0.0, |
|
"loss": 0.4088, |
|
"step": 108 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 108, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 6, |
|
"total_flos": 1.1787401428992e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|