inflaton's picture
Training in progress, step 5000
5c9c326 verified
raw
history blame
No virus
2.76 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.6916890080428955,
"eval_steps": 500,
"global_step": 3500,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.6702412868632708,
"grad_norm": 23.28346824645996,
"learning_rate": 9.798511296679954e-06,
"loss": 0.5531,
"step": 500
},
{
"epoch": 1.0,
"eval_accuracy": 0.8025477528572083,
"eval_loss": 0.4079710841178894,
"eval_runtime": 9.2612,
"eval_samples_per_second": 322.097,
"eval_steps_per_second": 20.192,
"step": 746
},
{
"epoch": 1.3404825737265416,
"grad_norm": 38.64032745361328,
"learning_rate": 9.094595255251797e-06,
"loss": 0.3957,
"step": 1000
},
{
"epoch": 2.0,
"eval_accuracy": 0.8276902437210083,
"eval_loss": 0.42224231362342834,
"eval_runtime": 9.5099,
"eval_samples_per_second": 313.673,
"eval_steps_per_second": 19.664,
"step": 1492
},
{
"epoch": 2.0107238605898123,
"grad_norm": 34.970176696777344,
"learning_rate": 8.390679213823639e-06,
"loss": 0.3185,
"step": 1500
},
{
"epoch": 2.680965147453083,
"grad_norm": 13.735482215881348,
"learning_rate": 7.686763172395482e-06,
"loss": 0.1887,
"step": 2000
},
{
"epoch": 3.0,
"eval_accuracy": 0.819309413433075,
"eval_loss": 0.49816903471946716,
"eval_runtime": 9.3819,
"eval_samples_per_second": 317.952,
"eval_steps_per_second": 19.932,
"step": 2238
},
{
"epoch": 3.351206434316354,
"grad_norm": 10.144265174865723,
"learning_rate": 6.982847130967324e-06,
"loss": 0.1522,
"step": 2500
},
{
"epoch": 4.0,
"eval_accuracy": 0.8313778042793274,
"eval_loss": 0.6922349333763123,
"eval_runtime": 9.2835,
"eval_samples_per_second": 321.324,
"eval_steps_per_second": 20.143,
"step": 2984
},
{
"epoch": 4.021447721179625,
"grad_norm": 39.181697845458984,
"learning_rate": 6.278931089539166e-06,
"loss": 0.1152,
"step": 3000
},
{
"epoch": 4.6916890080428955,
"grad_norm": 5.586261749267578,
"learning_rate": 5.575015048111009e-06,
"loss": 0.0669,
"step": 3500
}
],
"logging_steps": 500,
"max_steps": 7460,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 9234076239853272.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": {
"learning_rate": 1.0502427338108112e-05,
"per_device_train_batch_size": 16
}
}