|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"global_step": 3726, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 6.81213097155126e-05, |
|
"loss": 2.26, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 6.624261943102522e-05, |
|
"loss": 1.42, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 6.436392914653783e-05, |
|
"loss": 1.28, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 6.248523886205046e-05, |
|
"loss": 1.215, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 6.0606548577563064e-05, |
|
"loss": 1.15, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 5.872785829307568e-05, |
|
"loss": 1.085, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.6849168008588294e-05, |
|
"loss": 1.09, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.497047772410091e-05, |
|
"loss": 1.07, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.3091787439613525e-05, |
|
"loss": 1.035, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.121309715512614e-05, |
|
"loss": 1.03, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.933440687063875e-05, |
|
"loss": 0.99, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.7455716586151364e-05, |
|
"loss": 0.97, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_HasAns_exact": 67.12804646365956, |
|
"eval_HasAns_f1": 73.77021174615072, |
|
"eval_HasAns_total": 18423, |
|
"eval_NoAns_exact": 71.52285101468405, |
|
"eval_NoAns_f1": 71.52285101468405, |
|
"eval_NoAns_total": 6061, |
|
"eval_best_exact": 68.21597778140827, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 73.21387890047849, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 68.21597778140827, |
|
"eval_f1": 73.21387890047855, |
|
"eval_total": 24484, |
|
"step": 1242 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.557702630166398e-05, |
|
"loss": 0.895, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.3698336017176595e-05, |
|
"loss": 0.83, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.18196457326892e-05, |
|
"loss": 0.845, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 3.994095544820182e-05, |
|
"loss": 0.845, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 3.8062265163714434e-05, |
|
"loss": 0.84, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 3.618357487922705e-05, |
|
"loss": 0.825, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 3.4304884594739665e-05, |
|
"loss": 0.835, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 3.242619431025228e-05, |
|
"loss": 0.825, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.0547504025764895e-05, |
|
"loss": 0.825, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.8668813741277507e-05, |
|
"loss": 0.795, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 2.679012345679012e-05, |
|
"loss": 0.8, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 2.4911433172302738e-05, |
|
"loss": 0.795, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_HasAns_exact": 68.19736199316073, |
|
"eval_HasAns_f1": 74.61586025418268, |
|
"eval_HasAns_total": 18423, |
|
"eval_NoAns_exact": 76.17554858934169, |
|
"eval_NoAns_f1": 76.17554858934169, |
|
"eval_NoAns_total": 6061, |
|
"eval_best_exact": 70.17235745793171, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 75.00196019697727, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 70.17235745793171, |
|
"eval_f1": 75.00196019697732, |
|
"eval_total": 24484, |
|
"step": 2484 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 2.303274288781535e-05, |
|
"loss": 0.795, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 2.1154052603327965e-05, |
|
"loss": 0.67, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.9275362318840577e-05, |
|
"loss": 0.665, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 1.7396672034353192e-05, |
|
"loss": 0.68, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.5517981749865808e-05, |
|
"loss": 0.675, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.3639291465378421e-05, |
|
"loss": 0.665, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.1760601180891035e-05, |
|
"loss": 0.655, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.881910896403648e-06, |
|
"loss": 0.66, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 8.003220611916264e-06, |
|
"loss": 0.65, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 6.124530327428877e-06, |
|
"loss": 0.65, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.245840042941492e-06, |
|
"loss": 0.65, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 2.367149758454106e-06, |
|
"loss": 0.65, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 4.884594739667203e-07, |
|
"loss": 0.645, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_HasAns_exact": 69.65206535309125, |
|
"eval_HasAns_f1": 76.1771308342428, |
|
"eval_HasAns_total": 18423, |
|
"eval_NoAns_exact": 77.6604520706154, |
|
"eval_NoAns_f1": 77.6604520706154, |
|
"eval_NoAns_total": 6061, |
|
"eval_best_exact": 71.63453684038556, |
|
"eval_best_exact_thresh": 0.0, |
|
"eval_best_f1": 76.5443261460238, |
|
"eval_best_f1_thresh": 0.0, |
|
"eval_exact": 71.63453684038556, |
|
"eval_f1": 76.54432614602389, |
|
"eval_total": 24484, |
|
"step": 3726 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 3726, |
|
"total_flos": 1.8682613172115866e+17, |
|
"train_loss": 0.8973765432098766, |
|
"train_runtime": 1588.8114, |
|
"train_samples_per_second": 614.369, |
|
"train_steps_per_second": 2.401 |
|
} |
|
], |
|
"max_steps": 3726, |
|
"num_train_epochs": 3, |
|
"total_flos": 1.8682613172115866e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|