AAA01101312's picture
Training in progress, step 2500
fb483a6 verified
raw
history blame contribute delete
No virus
2.84 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.289308176100629,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.6135483870967742,
"eval_loss": 0.23216405510902405,
"eval_runtime": 14.5556,
"eval_samples_per_second": 212.976,
"eval_steps_per_second": 26.656,
"step": 318
},
{
"epoch": 1.5723270440251573,
"grad_norm": 0.5930498838424683,
"learning_rate": 1.606918238993711e-05,
"loss": 0.3696,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.8325806451612904,
"eval_loss": 0.1069631278514862,
"eval_runtime": 15.7006,
"eval_samples_per_second": 197.444,
"eval_steps_per_second": 24.712,
"step": 636
},
{
"epoch": 3.0,
"eval_accuracy": 0.8890322580645161,
"eval_loss": 0.0675421878695488,
"eval_runtime": 15.266,
"eval_samples_per_second": 203.065,
"eval_steps_per_second": 25.416,
"step": 954
},
{
"epoch": 3.1446540880503147,
"grad_norm": 0.593765914440155,
"learning_rate": 1.2138364779874214e-05,
"loss": 0.1241,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.9048387096774193,
"eval_loss": 0.05093123018741608,
"eval_runtime": 14.5589,
"eval_samples_per_second": 212.928,
"eval_steps_per_second": 26.65,
"step": 1272
},
{
"epoch": 4.716981132075472,
"grad_norm": 0.48284056782722473,
"learning_rate": 8.207547169811321e-06,
"loss": 0.0767,
"step": 1500
},
{
"epoch": 5.0,
"eval_accuracy": 0.9083870967741936,
"eval_loss": 0.0422496534883976,
"eval_runtime": 15.2188,
"eval_samples_per_second": 203.695,
"eval_steps_per_second": 25.495,
"step": 1590
},
{
"epoch": 6.0,
"eval_accuracy": 0.915483870967742,
"eval_loss": 0.037340711802244186,
"eval_runtime": 15.6107,
"eval_samples_per_second": 198.581,
"eval_steps_per_second": 24.855,
"step": 1908
},
{
"epoch": 6.289308176100629,
"grad_norm": 0.24097558856010437,
"learning_rate": 4.276729559748428e-06,
"loss": 0.0607,
"step": 2000
}
],
"logging_steps": 500,
"max_steps": 2544,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 520991326672152.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": {
"alpha": 0.3077189989743373,
"num_train_epochs": 8,
"temperature": 5
}
}