|
{ |
|
"best_metric": 1.0269454717636108, |
|
"best_model_checkpoint": "/kaggle/output/checkpoint-33000", |
|
"epoch": 1.3445241199478488, |
|
"eval_steps": 1000, |
|
"global_step": 33000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.7777777777777777e-11, |
|
"loss": 1.029, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.7638888888888893e-08, |
|
"loss": 1.181, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.3273453093812375, |
|
"eval_loss": 1.1529844999313354, |
|
"eval_runtime": 54.2837, |
|
"eval_samples_per_second": 92.293, |
|
"eval_steps_per_second": 11.55, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.541666666666667e-08, |
|
"loss": 1.1527, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.33013972055888224, |
|
"eval_loss": 1.1351025104522705, |
|
"eval_runtime": 54.0518, |
|
"eval_samples_per_second": 92.689, |
|
"eval_steps_per_second": 11.6, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.316666666666666e-08, |
|
"loss": 1.142, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.3317365269461078, |
|
"eval_loss": 1.127414345741272, |
|
"eval_runtime": 54.0871, |
|
"eval_samples_per_second": 92.628, |
|
"eval_steps_per_second": 11.592, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.1091666666666668e-07, |
|
"loss": 1.1371, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.331936127744511, |
|
"eval_loss": 1.1194497346878052, |
|
"eval_runtime": 54.3907, |
|
"eval_samples_per_second": 92.111, |
|
"eval_steps_per_second": 11.528, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.3869444444444447e-07, |
|
"loss": 1.1246, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.3345309381237525, |
|
"eval_loss": 1.1131775379180908, |
|
"eval_runtime": 54.3492, |
|
"eval_samples_per_second": 92.182, |
|
"eval_steps_per_second": 11.537, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.664722222222222e-07, |
|
"loss": 1.1201, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.34151696606786425, |
|
"eval_loss": 1.1093512773513794, |
|
"eval_runtime": 54.3536, |
|
"eval_samples_per_second": 92.174, |
|
"eval_steps_per_second": 11.536, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9425e-07, |
|
"loss": 1.115, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.34510978043912177, |
|
"eval_loss": 1.1066845655441284, |
|
"eval_runtime": 53.9977, |
|
"eval_samples_per_second": 92.782, |
|
"eval_steps_per_second": 11.612, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2202777777777778e-07, |
|
"loss": 1.1125, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.3449101796407186, |
|
"eval_loss": 1.1031057834625244, |
|
"eval_runtime": 54.2562, |
|
"eval_samples_per_second": 92.34, |
|
"eval_steps_per_second": 11.556, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.498055555555556e-07, |
|
"loss": 1.1072, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.34870259481037924, |
|
"eval_loss": 1.1044224500656128, |
|
"eval_runtime": 54.0795, |
|
"eval_samples_per_second": 92.641, |
|
"eval_steps_per_second": 11.594, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.7755555555555554e-07, |
|
"loss": 1.1064, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.3499001996007984, |
|
"eval_loss": 1.1001031398773193, |
|
"eval_runtime": 54.0869, |
|
"eval_samples_per_second": 92.629, |
|
"eval_steps_per_second": 11.592, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.053333333333333e-07, |
|
"loss": 1.1053, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.36087824351297404, |
|
"eval_loss": 1.0987293720245361, |
|
"eval_runtime": 53.9688, |
|
"eval_samples_per_second": 92.831, |
|
"eval_steps_per_second": 11.618, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.3311111111111114e-07, |
|
"loss": 1.104, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.3594810379241517, |
|
"eval_loss": 1.0968631505966187, |
|
"eval_runtime": 54.0936, |
|
"eval_samples_per_second": 92.617, |
|
"eval_steps_per_second": 11.591, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.608888888888889e-07, |
|
"loss": 1.1022, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.36087824351297404, |
|
"eval_loss": 1.0962979793548584, |
|
"eval_runtime": 54.0403, |
|
"eval_samples_per_second": 92.709, |
|
"eval_steps_per_second": 11.602, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.886666666666667e-07, |
|
"loss": 1.1029, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.3528942115768463, |
|
"eval_loss": 1.0987964868545532, |
|
"eval_runtime": 54.06, |
|
"eval_samples_per_second": 92.675, |
|
"eval_steps_per_second": 11.598, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.163611111111111e-07, |
|
"loss": 1.1011, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.36926147704590817, |
|
"eval_loss": 1.0941977500915527, |
|
"eval_runtime": 54.0807, |
|
"eval_samples_per_second": 92.639, |
|
"eval_steps_per_second": 11.594, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.441388888888889e-07, |
|
"loss": 1.1004, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.36866267465069863, |
|
"eval_loss": 1.093828558921814, |
|
"eval_runtime": 54.2208, |
|
"eval_samples_per_second": 92.4, |
|
"eval_steps_per_second": 11.564, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.719166666666667e-07, |
|
"loss": 1.0994, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.36726546906187624, |
|
"eval_loss": 1.093470811843872, |
|
"eval_runtime": 54.2584, |
|
"eval_samples_per_second": 92.336, |
|
"eval_steps_per_second": 11.556, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.996944444444445e-07, |
|
"loss": 1.0993, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.36327345309381237, |
|
"eval_loss": 1.0935499668121338, |
|
"eval_runtime": 54.0554, |
|
"eval_samples_per_second": 92.683, |
|
"eval_steps_per_second": 11.599, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.274722222222222e-07, |
|
"loss": 1.0981, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.38682634730538923, |
|
"eval_loss": 1.089295506477356, |
|
"eval_runtime": 54.0761, |
|
"eval_samples_per_second": 92.647, |
|
"eval_steps_per_second": 11.595, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.5525e-07, |
|
"loss": 1.0979, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.4167664670658683, |
|
"eval_loss": 1.084443211555481, |
|
"eval_runtime": 54.0394, |
|
"eval_samples_per_second": 92.71, |
|
"eval_steps_per_second": 11.603, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.830000000000001e-07, |
|
"loss": 1.0946, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.4285429141716567, |
|
"eval_loss": 1.073406457901001, |
|
"eval_runtime": 54.1561, |
|
"eval_samples_per_second": 92.51, |
|
"eval_steps_per_second": 11.578, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.107777777777778e-07, |
|
"loss": 1.0882, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.4413173652694611, |
|
"eval_loss": 1.0576634407043457, |
|
"eval_runtime": 53.9715, |
|
"eval_samples_per_second": 92.827, |
|
"eval_steps_per_second": 11.617, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.385277777777778e-07, |
|
"loss": 1.0811, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.4377245508982036, |
|
"eval_loss": 1.063384771347046, |
|
"eval_runtime": 54.4655, |
|
"eval_samples_per_second": 91.985, |
|
"eval_steps_per_second": 11.512, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.662777777777778e-07, |
|
"loss": 1.0773, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4347305389221557, |
|
"eval_loss": 1.065600037574768, |
|
"eval_runtime": 54.0545, |
|
"eval_samples_per_second": 92.684, |
|
"eval_steps_per_second": 11.599, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.940555555555557e-07, |
|
"loss": 1.0749, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.4499001996007984, |
|
"eval_loss": 1.0491468906402588, |
|
"eval_runtime": 53.9397, |
|
"eval_samples_per_second": 92.882, |
|
"eval_steps_per_second": 11.624, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 7.218055555555556e-07, |
|
"loss": 1.0687, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.44471057884231535, |
|
"eval_loss": 1.0531450510025024, |
|
"eval_runtime": 53.996, |
|
"eval_samples_per_second": 92.785, |
|
"eval_steps_per_second": 11.612, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.495833333333334e-07, |
|
"loss": 1.07, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.4475049900199601, |
|
"eval_loss": 1.0514291524887085, |
|
"eval_runtime": 54.0862, |
|
"eval_samples_per_second": 92.63, |
|
"eval_steps_per_second": 11.593, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.773611111111112e-07, |
|
"loss": 1.0619, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.4483033932135729, |
|
"eval_loss": 1.0422637462615967, |
|
"eval_runtime": 54.3061, |
|
"eval_samples_per_second": 92.255, |
|
"eval_steps_per_second": 11.546, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.051388888888889e-07, |
|
"loss": 1.0586, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.4544910179640719, |
|
"eval_loss": 1.0364656448364258, |
|
"eval_runtime": 54.1221, |
|
"eval_samples_per_second": 92.568, |
|
"eval_steps_per_second": 11.585, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 8.32888888888889e-07, |
|
"loss": 1.0571, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_accuracy": 0.4594810379241517, |
|
"eval_loss": 1.0300606489181519, |
|
"eval_runtime": 54.1093, |
|
"eval_samples_per_second": 92.59, |
|
"eval_steps_per_second": 11.588, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 8.606666666666668e-07, |
|
"loss": 1.0564, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.4666666666666667, |
|
"eval_loss": 1.0317378044128418, |
|
"eval_runtime": 54.0415, |
|
"eval_samples_per_second": 92.707, |
|
"eval_steps_per_second": 11.602, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.884444444444445e-07, |
|
"loss": 1.0541, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.47065868263473054, |
|
"eval_loss": 1.0293121337890625, |
|
"eval_runtime": 54.1157, |
|
"eval_samples_per_second": 92.579, |
|
"eval_steps_per_second": 11.586, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 9.161944444444445e-07, |
|
"loss": 1.0485, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_accuracy": 0.480439121756487, |
|
"eval_loss": 1.0269454717636108, |
|
"eval_runtime": 54.0627, |
|
"eval_samples_per_second": 92.67, |
|
"eval_steps_per_second": 11.598, |
|
"step": 33000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 10000000, |
|
"num_train_epochs": 408, |
|
"save_steps": 1000, |
|
"total_flos": 6.89814580077527e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|