|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9458749343142406, |
|
"eval_steps": 500, |
|
"global_step": 4500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.4763257575757574e-05, |
|
"loss": 4.136, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.450021043771044e-05, |
|
"loss": 3.8037, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2.42371632996633e-05, |
|
"loss": 3.7217, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.3974116161616165e-05, |
|
"loss": 3.7068, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.3711069023569023e-05, |
|
"loss": 3.6665, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.3448021885521888e-05, |
|
"loss": 3.6629, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.318497474747475e-05, |
|
"loss": 3.6539, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.292192760942761e-05, |
|
"loss": 3.6554, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.2658880471380472e-05, |
|
"loss": 3.6323, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2.2395833333333337e-05, |
|
"loss": 3.6276, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.2132786195286195e-05, |
|
"loss": 3.5915, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.186973905723906e-05, |
|
"loss": 3.6035, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 2.160669191919192e-05, |
|
"loss": 3.5998, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.1343644781144782e-05, |
|
"loss": 3.5809, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.1080597643097644e-05, |
|
"loss": 3.5888, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.0817550505050505e-05, |
|
"loss": 3.6076, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 2.055450336700337e-05, |
|
"loss": 3.5806, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 2.0291456228956228e-05, |
|
"loss": 3.5683, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.0028409090909093e-05, |
|
"loss": 3.5725, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9765361952861954e-05, |
|
"loss": 3.57, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9502314814814815e-05, |
|
"loss": 3.5478, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9239267676767677e-05, |
|
"loss": 3.5495, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.897622053872054e-05, |
|
"loss": 3.5462, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.87131734006734e-05, |
|
"loss": 3.5513, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.8450126262626264e-05, |
|
"loss": 3.5378, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.8187079124579126e-05, |
|
"loss": 3.5639, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.7924031986531987e-05, |
|
"loss": 3.5629, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.766098484848485e-05, |
|
"loss": 3.5553, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.7397937710437713e-05, |
|
"loss": 3.504, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.7134890572390575e-05, |
|
"loss": 3.5303, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.6871843434343436e-05, |
|
"loss": 3.5099, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.6608796296296297e-05, |
|
"loss": 3.5221, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.634574915824916e-05, |
|
"loss": 3.5422, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.608270202020202e-05, |
|
"loss": 3.5353, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.581965488215488e-05, |
|
"loss": 3.5026, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.5556607744107746e-05, |
|
"loss": 3.5182, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.5293560606060604e-05, |
|
"loss": 3.4875, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.503051346801347e-05, |
|
"loss": 3.4945, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.476746632996633e-05, |
|
"loss": 3.4714, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.4504419191919192e-05, |
|
"loss": 3.5158, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.4241372053872053e-05, |
|
"loss": 3.5165, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.3978324915824916e-05, |
|
"loss": 3.5131, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.371527777777778e-05, |
|
"loss": 3.5227, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.345223063973064e-05, |
|
"loss": 3.5369, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.3189183501683502e-05, |
|
"loss": 3.5149, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.2926136363636365e-05, |
|
"loss": 3.4936, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.2663089225589225e-05, |
|
"loss": 3.4927, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.2400042087542088e-05, |
|
"loss": 3.5001, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.213699494949495e-05, |
|
"loss": 3.5218, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.1873947811447813e-05, |
|
"loss": 3.4894, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.1610900673400674e-05, |
|
"loss": 3.4919, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.1347853535353537e-05, |
|
"loss": 3.4681, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.1084806397306398e-05, |
|
"loss": 3.4746, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.082175925925926e-05, |
|
"loss": 3.4908, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.0558712121212121e-05, |
|
"loss": 3.5006, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.0295664983164983e-05, |
|
"loss": 3.4835, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.0032617845117846e-05, |
|
"loss": 3.4842, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 9.769570707070707e-06, |
|
"loss": 3.4779, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 9.506523569023568e-06, |
|
"loss": 3.4768, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 9.243476430976432e-06, |
|
"loss": 3.4755, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 8.980429292929293e-06, |
|
"loss": 3.4849, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 8.717382154882154e-06, |
|
"loss": 3.481, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 8.454335016835017e-06, |
|
"loss": 3.4918, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 8.191287878787879e-06, |
|
"loss": 3.4624, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 7.928240740740742e-06, |
|
"loss": 3.4826, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 7.665193602693603e-06, |
|
"loss": 3.4719, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 7.402146464646465e-06, |
|
"loss": 3.4858, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 7.139099326599327e-06, |
|
"loss": 3.4616, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 6.876052188552188e-06, |
|
"loss": 3.4697, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 6.613005050505051e-06, |
|
"loss": 3.4486, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 6.349957912457913e-06, |
|
"loss": 3.4718, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 6.086910774410775e-06, |
|
"loss": 3.4443, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.823863636363636e-06, |
|
"loss": 3.4849, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 5.560816498316499e-06, |
|
"loss": 3.4729, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 5.297769360269361e-06, |
|
"loss": 3.4664, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 5.034722222222222e-06, |
|
"loss": 3.4643, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.7716750841750845e-06, |
|
"loss": 3.4514, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.508627946127946e-06, |
|
"loss": 3.4661, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.245580808080808e-06, |
|
"loss": 3.4637, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.98253367003367e-06, |
|
"loss": 3.453, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.7194865319865326e-06, |
|
"loss": 3.4671, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.456439393939394e-06, |
|
"loss": 3.4439, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 3.1933922558922558e-06, |
|
"loss": 3.4504, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.930345117845118e-06, |
|
"loss": 3.4759, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.66729797979798e-06, |
|
"loss": 3.4356, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.4042508417508416e-06, |
|
"loss": 3.4618, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.141203703703704e-06, |
|
"loss": 3.463, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.8781565656565657e-06, |
|
"loss": 3.4524, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.6151094276094277e-06, |
|
"loss": 3.4321, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.3520622895622895e-06, |
|
"loss": 3.4518, |
|
"step": 4500 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 4757, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 1.104838456586335e+19, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|