Spaces:
Sleeping
Sleeping
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 2.1880341880341883, | |
"eval_steps": 100, | |
"global_step": 2800, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.07814407814407814, | |
"grad_norm": 0.389704167842865, | |
"learning_rate": 4.000000000000001e-06, | |
"loss": 2.4124, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.07814407814407814, | |
"eval_loss": 2.2181031703948975, | |
"eval_runtime": 3367.0243, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.1562881562881563, | |
"grad_norm": 0.27927058935165405, | |
"learning_rate": 8.000000000000001e-06, | |
"loss": 2.2928, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1562881562881563, | |
"eval_loss": 2.0591659545898438, | |
"eval_runtime": 3366.419, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.23443223443223443, | |
"grad_norm": 0.22626566886901855, | |
"learning_rate": 1.2e-05, | |
"loss": 2.1104, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.23443223443223443, | |
"eval_loss": 1.8923490047454834, | |
"eval_runtime": 3360.9485, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.3125763125763126, | |
"grad_norm": 0.210145503282547, | |
"learning_rate": 1.6000000000000003e-05, | |
"loss": 1.9296, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3125763125763126, | |
"eval_loss": 1.7505871057510376, | |
"eval_runtime": 3357.6413, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3907203907203907, | |
"grad_norm": 0.2200106978416443, | |
"learning_rate": 2e-05, | |
"loss": 1.8345, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.3907203907203907, | |
"eval_loss": 1.7071024179458618, | |
"eval_runtime": 3361.7985, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.46886446886446886, | |
"grad_norm": 0.22381120920181274, | |
"learning_rate": 1.9980028422948323e-05, | |
"loss": 1.8103, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.46886446886446886, | |
"eval_loss": 1.6838117837905884, | |
"eval_runtime": 3360.7216, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5470085470085471, | |
"grad_norm": 0.2300158590078354, | |
"learning_rate": 1.9920193464571277e-05, | |
"loss": 1.7827, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.5470085470085471, | |
"eval_loss": 1.6691471338272095, | |
"eval_runtime": 3362.2, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.6251526251526252, | |
"grad_norm": 0.25236302614212036, | |
"learning_rate": 1.982073412456518e-05, | |
"loss": 1.765, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.6251526251526252, | |
"eval_loss": 1.6575521230697632, | |
"eval_runtime": 3363.3293, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.7032967032967034, | |
"grad_norm": 0.26079344749450684, | |
"learning_rate": 1.9682047674904527e-05, | |
"loss": 1.7425, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7032967032967034, | |
"eval_loss": 1.6489633321762085, | |
"eval_runtime": 3359.1797, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7814407814407814, | |
"grad_norm": 0.2565068304538727, | |
"learning_rate": 1.9504688073012397e-05, | |
"loss": 1.7521, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.7814407814407814, | |
"eval_loss": 1.6409879922866821, | |
"eval_runtime": 3358.9412, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.8595848595848596, | |
"grad_norm": 0.26189491152763367, | |
"learning_rate": 1.9289363749079798e-05, | |
"loss": 1.7326, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.8595848595848596, | |
"eval_loss": 1.6336146593093872, | |
"eval_runtime": 3363.4368, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.9377289377289377, | |
"grad_norm": 0.2778639495372772, | |
"learning_rate": 1.903693477637204e-05, | |
"loss": 1.7298, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.9377289377289377, | |
"eval_loss": 1.627331256866455, | |
"eval_runtime": 3361.7893, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 1.0158730158730158, | |
"grad_norm": 0.2954370975494385, | |
"learning_rate": 1.874840943582482e-05, | |
"loss": 1.7128, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 1.0158730158730158, | |
"eval_loss": 1.62191641330719, | |
"eval_runtime": 3358.2318, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 1.0940170940170941, | |
"grad_norm": 0.2985123097896576, | |
"learning_rate": 1.842494018865216e-05, | |
"loss": 1.7265, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 1.0940170940170941, | |
"eval_loss": 1.616808295249939, | |
"eval_runtime": 3366.219, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 1.1721611721611722, | |
"grad_norm": 0.3037821054458618, | |
"learning_rate": 1.8067819073052813e-05, | |
"loss": 1.7148, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 1.1721611721611722, | |
"eval_loss": 1.612409234046936, | |
"eval_runtime": 3364.8773, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 1.2503052503052503, | |
"grad_norm": 0.3078468441963196, | |
"learning_rate": 1.7678472543402166e-05, | |
"loss": 1.7003, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 1.2503052503052503, | |
"eval_loss": 1.607498049736023, | |
"eval_runtime": 3366.2319, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 1.3284493284493284, | |
"grad_norm": 0.3602350056171417, | |
"learning_rate": 1.7258455772543573e-05, | |
"loss": 1.6981, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 1.3284493284493284, | |
"eval_loss": 1.6040035486221313, | |
"eval_runtime": 3369.5225, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 1.4065934065934065, | |
"grad_norm": 0.34948959946632385, | |
"learning_rate": 1.6809446439937472e-05, | |
"loss": 1.703, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 1.4065934065934065, | |
"eval_loss": 1.6002304553985596, | |
"eval_runtime": 3359.2322, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 1.4847374847374848, | |
"grad_norm": 0.33941686153411865, | |
"learning_rate": 1.6333238030480473e-05, | |
"loss": 1.6842, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 1.4847374847374848, | |
"eval_loss": 1.5976512432098389, | |
"eval_runtime": 3363.9968, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 1.5628815628815629, | |
"grad_norm": 0.3594741225242615, | |
"learning_rate": 1.5831732670761e-05, | |
"loss": 1.6813, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 1.5628815628815629, | |
"eval_loss": 1.5942325592041016, | |
"eval_runtime": 3356.6247, | |
"eval_samples_per_second": 0.753, | |
"eval_steps_per_second": 0.094, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 1.641025641025641, | |
"grad_norm": 0.3513031303882599, | |
"learning_rate": 1.5306933531365748e-05, | |
"loss": 1.6747, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 1.641025641025641, | |
"eval_loss": 1.591470718383789, | |
"eval_runtime": 3360.2103, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 1.7191697191697193, | |
"grad_norm": 0.3548774719238281, | |
"learning_rate": 1.4760936825584535e-05, | |
"loss": 1.6821, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 1.7191697191697193, | |
"eval_loss": 1.5890487432479858, | |
"eval_runtime": 3360.7245, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 1.7973137973137974, | |
"grad_norm": 0.37473219633102417, | |
"learning_rate": 1.4195923436473257e-05, | |
"loss": 1.685, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 1.7973137973137974, | |
"eval_loss": 1.586594820022583, | |
"eval_runtime": 3365.8879, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 1.8754578754578755, | |
"grad_norm": 0.37071430683135986, | |
"learning_rate": 1.3614150205719086e-05, | |
"loss": 1.6602, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 1.8754578754578755, | |
"eval_loss": 1.5838119983673096, | |
"eval_runtime": 3363.0708, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 1.9536019536019538, | |
"grad_norm": 0.4014996588230133, | |
"learning_rate": 1.3017940919102943e-05, | |
"loss": 1.682, | |
"step": 2500 | |
}, | |
{ | |
"epoch": 1.9536019536019538, | |
"eval_loss": 1.5820069313049316, | |
"eval_runtime": 3367.2545, | |
"eval_samples_per_second": 0.75, | |
"eval_steps_per_second": 0.094, | |
"step": 2500 | |
}, | |
{ | |
"epoch": 2.0317460317460316, | |
"grad_norm": 0.3767947256565094, | |
"learning_rate": 1.2409677024566145e-05, | |
"loss": 1.6595, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 2.0317460317460316, | |
"eval_loss": 1.5797280073165894, | |
"eval_runtime": 3364.0566, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 2.10989010989011, | |
"grad_norm": 0.3852591812610626, | |
"learning_rate": 1.1791788119956191e-05, | |
"loss": 1.6606, | |
"step": 2700 | |
}, | |
{ | |
"epoch": 2.10989010989011, | |
"eval_loss": 1.578330636024475, | |
"eval_runtime": 3359.3638, | |
"eval_samples_per_second": 0.752, | |
"eval_steps_per_second": 0.094, | |
"step": 2700 | |
}, | |
{ | |
"epoch": 2.1880341880341883, | |
"grad_norm": 0.3954846262931824, | |
"learning_rate": 1.116674224844664e-05, | |
"loss": 1.6611, | |
"step": 2800 | |
}, | |
{ | |
"epoch": 2.1880341880341883, | |
"eval_loss": 1.5769294500350952, | |
"eval_runtime": 3365.2078, | |
"eval_samples_per_second": 0.751, | |
"eval_steps_per_second": 0.094, | |
"step": 2800 | |
} | |
], | |
"logging_steps": 100, | |
"max_steps": 5470, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 5, | |
"save_steps": 100, | |
"total_flos": 1.141344360972288e+18, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |