|
{ |
|
"best_metric": 0.16962459683418274, |
|
"best_model_checkpoint": "output/eminem/checkpoint-459", |
|
"epoch": 1.0, |
|
"global_step": 459, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001295937875943477, |
|
"loss": 0.4175, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001306389012238537, |
|
"loss": 0.4452, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00013161040580202325, |
|
"loss": 0.4523, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013250714864031736, |
|
"loss": 0.4272, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013332806575487712, |
|
"loss": 0.4701, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013407218312893365, |
|
"loss": 0.4872, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013473861786848294, |
|
"loss": 0.4231, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00013532657924983333, |
|
"loss": 0.4708, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001358353696578007, |
|
"loss": 0.5047, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00013626438541342652, |
|
"loss": 0.4957, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00013661311749024328, |
|
"loss": 0.4333, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001368811521182315, |
|
"loss": 0.4417, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00013706817127475857, |
|
"loss": 0.4644, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00013717395306191163, |
|
"loss": 0.5235, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00013719837196977938, |
|
"loss": 0.4143, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00013714139902536895, |
|
"loss": 0.4418, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00013700310182698214, |
|
"loss": 0.4862, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001367836444640114, |
|
"loss": 0.5152, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00013648328732224639, |
|
"loss": 0.4401, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00013610238677492728, |
|
"loss": 0.4883, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00013564139475990883, |
|
"loss": 0.475, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001351008582434381, |
|
"loss": 0.4708, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00013448141857117668, |
|
"loss": 0.5114, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000133783810707247, |
|
"loss": 0.4598, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00013300886236219912, |
|
"loss": 0.5016, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00013215749301093531, |
|
"loss": 0.5246, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001312307128017492, |
|
"loss": 0.4599, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00013022962135779, |
|
"loss": 0.5193, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001291554064723639, |
|
"loss": 0.4855, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00012800934269961218, |
|
"loss": 0.4923, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00012679278984226595, |
|
"loss": 0.5141, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00012550719133822919, |
|
"loss": 0.4847, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001241540725479539, |
|
"loss": 0.4419, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00012273503894459195, |
|
"loss": 0.5324, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00012125177420911749, |
|
"loss": 0.4099, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00011970603823262598, |
|
"loss": 0.4894, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00011809966502824082, |
|
"loss": 0.5617, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011643456055504982, |
|
"loss": 0.5006, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00011471270045669035, |
|
"loss": 0.4947, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00011293612771726151, |
|
"loss": 0.5112, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00011110695023730843, |
|
"loss": 0.4745, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00010922733833281926, |
|
"loss": 0.4961, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001072995221601338, |
|
"loss": 0.5159, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010532578906988555, |
|
"loss": 0.4521, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010330848089304184, |
|
"loss": 0.4683, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010124999116234466, |
|
"loss": 0.4694, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.915276227237154e-05, |
|
"loss": 0.4838, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.701928258165896e-05, |
|
"loss": 0.4934, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.485208346024501e-05, |
|
"loss": 0.4964, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.265373628622407e-05, |
|
"loss": 0.478, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.04268493947969e-05, |
|
"loss": 0.4836, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.817406498348864e-05, |
|
"loss": 0.4783, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.589805597719735e-05, |
|
"loss": 0.5033, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.360152285675815e-05, |
|
"loss": 0.4933, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.128719045483102e-05, |
|
"loss": 0.4802, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.895780472289125e-05, |
|
"loss": 0.4608, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.661612947317637e-05, |
|
"loss": 0.451, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.426494309940237e-05, |
|
"loss": 0.452, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.190703528022759e-05, |
|
"loss": 0.4496, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.95452036692842e-05, |
|
"loss": 0.4758, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.718225057579034e-05, |
|
"loss": 0.4928, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.48209796395876e-05, |
|
"loss": 0.5023, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.246419250465058e-05, |
|
"loss": 0.426, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.011468549492541e-05, |
|
"loss": 0.4651, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 5.777524629650007e-05, |
|
"loss": 0.5082, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.544865065003111e-05, |
|
"loss": 0.4546, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.313765905731657e-05, |
|
"loss": 0.4512, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.084501350596927e-05, |
|
"loss": 0.4794, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.857343421605311e-05, |
|
"loss": 0.4781, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.63256164125579e-05, |
|
"loss": 0.5233, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.410422712750424e-05, |
|
"loss": 0.4695, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.191190203551854e-05, |
|
"loss": 0.4788, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 3.975124232661141e-05, |
|
"loss": 0.4318, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 3.762481161987185e-05, |
|
"loss": 0.4609, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.553513292174085e-05, |
|
"loss": 0.4854, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.348468563245461e-05, |
|
"loss": 0.4337, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.1475902604251e-05, |
|
"loss": 0.4707, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.951116725479596e-05, |
|
"loss": 0.4394, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.7592810739257415e-05, |
|
"loss": 0.5088, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.572310918439686e-05, |
|
"loss": 0.4753, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.3904280987944108e-05, |
|
"loss": 0.4626, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.2138484186474054e-05, |
|
"loss": 0.4473, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.0427813894908452e-05, |
|
"loss": 0.4662, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.877429982065378e-05, |
|
"loss": 0.4383, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.7179903855360063e-05, |
|
"loss": 0.4584, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.564651774714127e-05, |
|
"loss": 0.4932, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.4175960856020567e-05, |
|
"loss": 0.4168, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.2769977995264743e-05, |
|
"loss": 0.5093, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.1430237361156786e-05, |
|
"loss": 0.486, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.0158328553691274e-05, |
|
"loss": 0.4456, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.95576069051646e-06, |
|
"loss": 0.4546, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.200975701212883, |
|
"eval_runtime": 14.7931, |
|
"eval_samples_per_second": 42.993, |
|
"eval_steps_per_second": 5.408, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.16962459683418274, |
|
"eval_runtime": 12.708, |
|
"eval_samples_per_second": 47.687, |
|
"eval_steps_per_second": 5.98, |
|
"step": 459 |
|
} |
|
], |
|
"max_steps": 918, |
|
"num_train_epochs": 2, |
|
"total_flos": 478817648640000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|