|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 8.04289544235925,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": true,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.6661079525947571,
|
|
"eval_loss": 0.5971955060958862,
|
|
"eval_runtime": 15.5437,
|
|
"eval_samples_per_second": 191.91,
|
|
"eval_steps_per_second": 12.031,
|
|
"step": 373
|
|
},
|
|
{
|
|
"epoch": 1.3404825737265416,
|
|
"grad_norm": 10.565417289733887,
|
|
"learning_rate": 1.8562171591313335e-05,
|
|
"loss": 0.6088,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.68890380859375,
|
|
"eval_loss": 0.554870069026947,
|
|
"eval_runtime": 20.9818,
|
|
"eval_samples_per_second": 142.171,
|
|
"eval_steps_per_second": 8.912,
|
|
"step": 746
|
|
},
|
|
{
|
|
"epoch": 2.680965147453083,
|
|
"grad_norm": 12.369528770446777,
|
|
"learning_rate": 1.5688770416187432e-05,
|
|
"loss": 0.4913,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.6895742416381836,
|
|
"eval_loss": 0.6084151268005371,
|
|
"eval_runtime": 21.3765,
|
|
"eval_samples_per_second": 139.546,
|
|
"eval_steps_per_second": 8.748,
|
|
"step": 1119
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.6959436535835266,
|
|
"eval_loss": 0.6611299514770508,
|
|
"eval_runtime": 14.7723,
|
|
"eval_samples_per_second": 201.931,
|
|
"eval_steps_per_second": 12.659,
|
|
"step": 1492
|
|
},
|
|
{
|
|
"epoch": 4.021447721179625,
|
|
"grad_norm": 14.231189727783203,
|
|
"learning_rate": 1.2815369241061527e-05,
|
|
"loss": 0.3834,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.7039892673492432,
|
|
"eval_loss": 0.6887115240097046,
|
|
"eval_runtime": 8.0268,
|
|
"eval_samples_per_second": 371.63,
|
|
"eval_steps_per_second": 23.297,
|
|
"step": 1865
|
|
},
|
|
{
|
|
"epoch": 5.361930294906166,
|
|
"grad_norm": 17.718364715576172,
|
|
"learning_rate": 9.941968065935624e-06,
|
|
"loss": 0.2616,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.7019778490066528,
|
|
"eval_loss": 0.7845392227172852,
|
|
"eval_runtime": 57.529,
|
|
"eval_samples_per_second": 51.852,
|
|
"eval_steps_per_second": 3.251,
|
|
"step": 2238
|
|
},
|
|
{
|
|
"epoch": 6.702412868632708,
|
|
"grad_norm": 17.930471420288086,
|
|
"learning_rate": 7.0685668908097215e-06,
|
|
"loss": 0.1949,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.6979550719261169,
|
|
"eval_loss": 0.8831968307495117,
|
|
"eval_runtime": 50.8363,
|
|
"eval_samples_per_second": 58.679,
|
|
"eval_steps_per_second": 3.678,
|
|
"step": 2611
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.7003017067909241,
|
|
"eval_loss": 0.9950036406517029,
|
|
"eval_runtime": 50.3735,
|
|
"eval_samples_per_second": 59.218,
|
|
"eval_steps_per_second": 3.712,
|
|
"step": 2984
|
|
},
|
|
{
|
|
"epoch": 8.04289544235925,
|
|
"grad_norm": 12.573803901672363,
|
|
"learning_rate": 4.1951657156838185e-06,
|
|
"loss": 0.1515,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 3730,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4527074893596984.0,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": {
|
|
"learning_rate": 2.1435572766439238e-05,
|
|
"per_device_train_batch_size": 32
|
|
}
|
|
}
|
|
|