|
{ |
|
"best_metric": 0.22196684777736664, |
|
"best_model_checkpoint": "sci_cite2/checkpoint-16134", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 16134, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.845047725300608e-06, |
|
"loss": 0.4454, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.690095450601216e-06, |
|
"loss": 0.3583, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.535143175901823e-06, |
|
"loss": 0.3288, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 9.38019090120243e-06, |
|
"loss": 0.3102, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.225238626503039e-06, |
|
"loss": 0.3006, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.070286351803646e-06, |
|
"loss": 0.2881, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 8.915334077104253e-06, |
|
"loss": 0.2824, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 8.76038180240486e-06, |
|
"loss": 0.2765, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.605429527705468e-06, |
|
"loss": 0.2682, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 8.450477253006075e-06, |
|
"loss": 0.2638, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 8.295524978306682e-06, |
|
"loss": 0.259, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 8.14057270360729e-06, |
|
"loss": 0.257, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 7.985620428907898e-06, |
|
"loss": 0.2526, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 7.830668154208505e-06, |
|
"loss": 0.248, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 7.675715879509111e-06, |
|
"loss": 0.2485, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 7.520763604809719e-06, |
|
"loss": 0.2418, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 8067, |
|
"train_accuracy": 0.8732977099630939, |
|
"train_f1": 0.8722354031725915, |
|
"train_loss": 0.213734969496727, |
|
"train_precision": 0.8861924332063127, |
|
"train_recall": 0.8732977099630939, |
|
"train_runtime": 2526.3719, |
|
"train_samples_per_second": 306.526, |
|
"train_steps_per_second": 3.193 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8648553719008264, |
|
"eval_f1": 0.863761374500173, |
|
"eval_loss": 0.2356892228126526, |
|
"eval_precision": 0.8767616410024536, |
|
"eval_recall": 0.8648553719008264, |
|
"eval_runtime": 633.1197, |
|
"eval_samples_per_second": 305.787, |
|
"eval_steps_per_second": 3.186, |
|
"step": 8067 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 7.365811330110326e-06, |
|
"loss": 0.222, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 7.210859055410934e-06, |
|
"loss": 0.2236, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 7.055906780711541e-06, |
|
"loss": 0.2179, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 6.900954506012149e-06, |
|
"loss": 0.2204, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 6.746002231312756e-06, |
|
"loss": 0.2182, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 6.591049956613363e-06, |
|
"loss": 0.2172, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 6.436097681913971e-06, |
|
"loss": 0.2094, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 6.281145407214578e-06, |
|
"loss": 0.2139, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 6.126193132515186e-06, |
|
"loss": 0.2105, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 5.9712408578157935e-06, |
|
"loss": 0.211, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 5.8162885831164004e-06, |
|
"loss": 0.2137, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 5.661336308417008e-06, |
|
"loss": 0.2132, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 5.506384033717615e-06, |
|
"loss": 0.2108, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 5.351431759018223e-06, |
|
"loss": 0.21, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 5.196479484318831e-06, |
|
"loss": 0.2109, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.041527209619438e-06, |
|
"loss": 0.2094, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 16134, |
|
"train_accuracy": 0.8852553854736195, |
|
"train_f1": 0.8841428775695753, |
|
"train_loss": 0.18161344528198242, |
|
"train_precision": 0.9007014174019812, |
|
"train_recall": 0.8852553854736195, |
|
"train_runtime": 2519.4245, |
|
"train_samples_per_second": 307.371, |
|
"train_steps_per_second": 3.202 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8720867768595041, |
|
"eval_f1": 0.8708637537852376, |
|
"eval_loss": 0.22196684777736664, |
|
"eval_precision": 0.8865148727683396, |
|
"eval_recall": 0.8720867768595041, |
|
"eval_runtime": 636.0409, |
|
"eval_samples_per_second": 304.383, |
|
"eval_steps_per_second": 3.171, |
|
"step": 16134 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 32268, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 4.0750535009734656e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|