|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.421265141318978, |
|
"eval_steps": 500, |
|
"global_step": 14000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.932705248990579e-05, |
|
"loss": 0.2243, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.8654104979811575e-05, |
|
"loss": 0.1513, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.15557065606117249, |
|
"eval_runtime": 122.4795, |
|
"eval_samples_per_second": 12.141, |
|
"eval_steps_per_second": 3.037, |
|
"step": 1486 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.7981157469717363e-05, |
|
"loss": 0.1319, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.730820995962315e-05, |
|
"loss": 0.1161, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 1.6635262449528936e-05, |
|
"loss": 0.1161, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.09562207758426666, |
|
"eval_runtime": 122.4632, |
|
"eval_samples_per_second": 12.142, |
|
"eval_steps_per_second": 3.038, |
|
"step": 2972 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.5962314939434724e-05, |
|
"loss": 0.1072, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.5289367429340512e-05, |
|
"loss": 0.0999, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.46164199192463e-05, |
|
"loss": 0.0765, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.11794405430555344, |
|
"eval_runtime": 122.4644, |
|
"eval_samples_per_second": 12.142, |
|
"eval_steps_per_second": 3.038, |
|
"step": 4458 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.3943472409152087e-05, |
|
"loss": 0.0886, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.3270524899057875e-05, |
|
"loss": 0.0793, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.2597577388963662e-05, |
|
"loss": 0.0798, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.11743835359811783, |
|
"eval_runtime": 122.4332, |
|
"eval_samples_per_second": 12.145, |
|
"eval_steps_per_second": 3.038, |
|
"step": 5944 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.192462987886945e-05, |
|
"loss": 0.0737, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.1251682368775237e-05, |
|
"loss": 0.0739, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 1.0578734858681023e-05, |
|
"loss": 0.0705, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.09409211575984955, |
|
"eval_runtime": 122.4407, |
|
"eval_samples_per_second": 12.145, |
|
"eval_steps_per_second": 3.038, |
|
"step": 7430 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 9.905787348586811e-06, |
|
"loss": 0.0668, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 9.2328398384926e-06, |
|
"loss": 0.0558, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 8.559892328398386e-06, |
|
"loss": 0.0609, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.12117910385131836, |
|
"eval_runtime": 122.4449, |
|
"eval_samples_per_second": 12.144, |
|
"eval_steps_per_second": 3.038, |
|
"step": 8916 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 7.886944818304172e-06, |
|
"loss": 0.0617, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 7.21399730820996e-06, |
|
"loss": 0.0537, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 6.541049798115747e-06, |
|
"loss": 0.0542, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.10626289248466492, |
|
"eval_runtime": 122.4232, |
|
"eval_samples_per_second": 12.146, |
|
"eval_steps_per_second": 3.039, |
|
"step": 10402 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 5.8681022880215346e-06, |
|
"loss": 0.0485, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 5.195154777927323e-06, |
|
"loss": 0.0522, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 4.522207267833109e-06, |
|
"loss": 0.0542, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.13295996189117432, |
|
"eval_runtime": 122.4378, |
|
"eval_samples_per_second": 12.145, |
|
"eval_steps_per_second": 3.038, |
|
"step": 11888 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 3.849259757738897e-06, |
|
"loss": 0.0545, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 3.176312247644684e-06, |
|
"loss": 0.0391, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 2.503364737550471e-06, |
|
"loss": 0.0418, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.1395300030708313, |
|
"eval_runtime": 122.4262, |
|
"eval_samples_per_second": 12.146, |
|
"eval_steps_per_second": 3.039, |
|
"step": 13374 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 1.8304172274562585e-06, |
|
"loss": 0.0385, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 9.42, |
|
"learning_rate": 1.157469717362046e-06, |
|
"loss": 0.0434, |
|
"step": 14000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 14860, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2.52083994624e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|