|
{ |
|
"best_metric": 0.8472222222222222, |
|
"best_model_checkpoint": "lungcancer_model_deit/checkpoint-84", |
|
"epoch": 12.987012987012987, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 9.56179428100586, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 1.3622, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"eval_accuracy": 0.4583333333333333, |
|
"eval_loss": 1.1259331703186035, |
|
"eval_runtime": 1.0658, |
|
"eval_samples_per_second": 67.556, |
|
"eval_steps_per_second": 8.444, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 4.5356645584106445, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.735, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.948051948051948, |
|
"eval_accuracy": 0.5, |
|
"eval_loss": 0.9460827708244324, |
|
"eval_runtime": 1.2329, |
|
"eval_samples_per_second": 58.401, |
|
"eval_steps_per_second": 7.3, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 11.363588333129883, |
|
"learning_rate": 4e-05, |
|
"loss": 0.5287, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.987012987012987, |
|
"eval_accuracy": 0.5972222222222222, |
|
"eval_loss": 0.9006068706512451, |
|
"eval_runtime": 1.1547, |
|
"eval_samples_per_second": 62.353, |
|
"eval_steps_per_second": 7.794, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 9.22575855255127, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.2822, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 3.896103896103896, |
|
"eval_accuracy": 0.6666666666666666, |
|
"eval_loss": 0.7236449718475342, |
|
"eval_runtime": 1.0624, |
|
"eval_samples_per_second": 67.772, |
|
"eval_steps_per_second": 8.471, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 10.75616455078125, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.1459, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 4.935064935064935, |
|
"eval_accuracy": 0.7777777777777778, |
|
"eval_loss": 0.5326564908027649, |
|
"eval_runtime": 1.0763, |
|
"eval_samples_per_second": 66.898, |
|
"eval_steps_per_second": 8.362, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 2.9255380630493164, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0687, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 5.974025974025974, |
|
"eval_accuracy": 0.8333333333333334, |
|
"eval_loss": 0.42256489396095276, |
|
"eval_runtime": 1.9017, |
|
"eval_samples_per_second": 37.86, |
|
"eval_steps_per_second": 4.733, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 2.1261632442474365, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.0269, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 6.883116883116883, |
|
"eval_accuracy": 0.8333333333333334, |
|
"eval_loss": 0.4134489595890045, |
|
"eval_runtime": 1.0782, |
|
"eval_samples_per_second": 66.781, |
|
"eval_steps_per_second": 8.348, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 0.20565827190876007, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.0139, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 7.922077922077922, |
|
"eval_accuracy": 0.8333333333333334, |
|
"eval_loss": 0.3836323916912079, |
|
"eval_runtime": 1.0936, |
|
"eval_samples_per_second": 65.839, |
|
"eval_steps_per_second": 8.23, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 0.11971937119960785, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0077, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 8.96103896103896, |
|
"eval_accuracy": 0.8194444444444444, |
|
"eval_loss": 0.5306501984596252, |
|
"eval_runtime": 1.071, |
|
"eval_samples_per_second": 67.228, |
|
"eval_steps_per_second": 8.404, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 0.09062337130308151, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0054, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.041697677224874496, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0042, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8333333333333334, |
|
"eval_loss": 0.4780552089214325, |
|
"eval_runtime": 1.1093, |
|
"eval_samples_per_second": 64.905, |
|
"eval_steps_per_second": 8.113, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 0.0230174008756876, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0038, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"eval_accuracy": 0.8472222222222222, |
|
"eval_loss": 0.41819339990615845, |
|
"eval_runtime": 1.0796, |
|
"eval_samples_per_second": 66.694, |
|
"eval_steps_per_second": 8.337, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 11.818181818181818, |
|
"grad_norm": 0.024311551824212074, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0039, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 11.948051948051948, |
|
"eval_accuracy": 0.8472222222222222, |
|
"eval_loss": 0.39705508947372437, |
|
"eval_runtime": 1.0729, |
|
"eval_samples_per_second": 67.106, |
|
"eval_steps_per_second": 8.388, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 12.727272727272727, |
|
"grad_norm": 0.022643834352493286, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0018, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 12.987012987012987, |
|
"eval_accuracy": 0.8472222222222222, |
|
"eval_loss": 0.39274024963378906, |
|
"eval_runtime": 1.0807, |
|
"eval_samples_per_second": 66.626, |
|
"eval_steps_per_second": 8.328, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 7, |
|
"max_steps": 105, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.175723941563904e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|