|
{ |
|
"best_metric": 0.5280066132545471, |
|
"best_model_checkpoint": "neurips-bert-combined2/checkpoint-100", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 1.5848274230957031, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.6679, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 2.478803873062134, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.6854, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 1.6922881603240967, |
|
"learning_rate": 2e-05, |
|
"loss": 0.7149, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 2.9766438007354736, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.7152, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 2.8051724433898926, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.7176, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 2.277381658554077, |
|
"learning_rate": 4e-05, |
|
"loss": 0.6848, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 1.7226181030273438, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.6969, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 1.2681584358215332, |
|
"learning_rate": 4.962962962962963e-05, |
|
"loss": 0.7009, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 2.7885658740997314, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 0.7162, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 2.3086979389190674, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 0.7059, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 1.9791280031204224, |
|
"learning_rate": 4.740740740740741e-05, |
|
"loss": 0.699, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 1.4085396528244019, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.6602, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 2.003840208053589, |
|
"learning_rate": 4.592592592592593e-05, |
|
"loss": 0.6221, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 2.16381573677063, |
|
"learning_rate": 4.518518518518519e-05, |
|
"loss": 0.6739, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 2.6895177364349365, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.6952, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 3.0539653301239014, |
|
"learning_rate": 4.3703703703703705e-05, |
|
"loss": 0.6916, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 1.6905677318572998, |
|
"learning_rate": 4.296296296296296e-05, |
|
"loss": 0.7039, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 1.9867430925369263, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 0.5762, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 2.075601816177368, |
|
"learning_rate": 4.148148148148148e-05, |
|
"loss": 0.6578, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 2.779691219329834, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 0.6646, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 2.750957489013672, |
|
"learning_rate": 4e-05, |
|
"loss": 0.5964, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 2.7763702869415283, |
|
"learning_rate": 3.925925925925926e-05, |
|
"loss": 0.6092, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 3.3457367420196533, |
|
"learning_rate": 3.851851851851852e-05, |
|
"loss": 0.5502, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 2.7507972717285156, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 0.5042, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.478252410888672, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.6469, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7, |
|
"eval_auc": 0.8071999999999999, |
|
"eval_f1": 0.7169811320754716, |
|
"eval_loss": 0.5654460787773132, |
|
"eval_precision": 0.6785714285714286, |
|
"eval_recall": 0.76, |
|
"eval_runtime": 7.9595, |
|
"eval_samples_per_second": 12.564, |
|
"eval_steps_per_second": 0.879, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 2.6288881301879883, |
|
"learning_rate": 3.62962962962963e-05, |
|
"loss": 0.3589, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 4.38114595413208, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.408, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 6.498011589050293, |
|
"learning_rate": 3.481481481481482e-05, |
|
"loss": 0.3517, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 7.760722637176514, |
|
"learning_rate": 3.4074074074074077e-05, |
|
"loss": 0.3926, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 8.327621459960938, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.2361, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 3.809370756149292, |
|
"learning_rate": 3.25925925925926e-05, |
|
"loss": 0.3742, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 12.8309965133667, |
|
"learning_rate": 3.185185185185185e-05, |
|
"loss": 0.576, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 9.91649055480957, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.808, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 9.183283805847168, |
|
"learning_rate": 3.037037037037037e-05, |
|
"loss": 0.5811, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 10.989977836608887, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 0.2667, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 12.52010726928711, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 0.7402, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 5.40288782119751, |
|
"learning_rate": 2.814814814814815e-05, |
|
"loss": 0.3061, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 7.423550128936768, |
|
"learning_rate": 2.7407407407407408e-05, |
|
"loss": 0.4198, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 9.907425880432129, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.4816, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 7.145705699920654, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.3788, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6400000000000001, |
|
"grad_norm": 1.4431923627853394, |
|
"learning_rate": 2.5185185185185183e-05, |
|
"loss": 0.3837, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 8.11041259765625, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 0.5001, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 8.253824234008789, |
|
"learning_rate": 2.3703703703703707e-05, |
|
"loss": 0.6369, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 4.3440937995910645, |
|
"learning_rate": 2.2962962962962965e-05, |
|
"loss": 0.3467, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 2.9534313678741455, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.2029, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 3.6825966835021973, |
|
"learning_rate": 2.148148148148148e-05, |
|
"loss": 0.3657, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 8.141383171081543, |
|
"learning_rate": 2.074074074074074e-05, |
|
"loss": 0.5135, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 23.226926803588867, |
|
"learning_rate": 2e-05, |
|
"loss": 0.8375, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 7.735658645629883, |
|
"learning_rate": 1.925925925925926e-05, |
|
"loss": 0.3536, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 11.800036430358887, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.294, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.77, |
|
"eval_auc": 0.8512, |
|
"eval_f1": 0.7722772277227723, |
|
"eval_loss": 0.5280066132545471, |
|
"eval_precision": 0.7647058823529411, |
|
"eval_recall": 0.78, |
|
"eval_runtime": 7.9925, |
|
"eval_samples_per_second": 12.512, |
|
"eval_steps_per_second": 0.876, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 150, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 26493479731200.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|