|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 18.997333333333334, |
|
"eval_steps": 500, |
|
"global_step": 3562, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 5e-05, |
|
"loss": 1.3988, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6660784313725491, |
|
"eval_loss": 1.2107256650924683, |
|
"eval_runtime": 7.728, |
|
"eval_samples_per_second": 64.7, |
|
"eval_steps_per_second": 8.152, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_exact_match": 19.4, |
|
"eval_f1": 27.911959489312448, |
|
"eval_qa_bleu": 2.842200543476061, |
|
"eval_qa_exact_match": 0.152, |
|
"eval_recite_bleu": 12.149078895446083, |
|
"eval_recite_exact_match": 0.0, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 5e-05, |
|
"loss": 1.238, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1977, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6676078431372549, |
|
"eval_loss": 1.1987448930740356, |
|
"eval_runtime": 7.4628, |
|
"eval_samples_per_second": 66.999, |
|
"eval_steps_per_second": 8.442, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_exact_match": 21.4, |
|
"eval_f1": 27.979969946667268, |
|
"eval_qa_bleu": 3.4273838915653356, |
|
"eval_qa_exact_match": 0.158, |
|
"eval_recite_bleu": 11.474877690710539, |
|
"eval_recite_exact_match": 0.0, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1628, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1458, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6675764705882353, |
|
"eval_loss": 1.1964411735534668, |
|
"eval_runtime": 6.6677, |
|
"eval_samples_per_second": 74.988, |
|
"eval_steps_per_second": 9.448, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_exact_match": 23.4, |
|
"eval_f1": 31.525761083856903, |
|
"eval_qa_bleu": 2.0606924929592334, |
|
"eval_qa_exact_match": 0.17, |
|
"eval_recite_bleu": 12.027903845545522, |
|
"eval_recite_exact_match": 0.0, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1179, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0707, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6665960784313726, |
|
"eval_loss": 1.208784580230713, |
|
"eval_runtime": 6.6546, |
|
"eval_samples_per_second": 75.136, |
|
"eval_steps_per_second": 9.467, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_exact_match": 23.4, |
|
"eval_f1": 33.18893657873498, |
|
"eval_qa_bleu": 3.8183981024520506, |
|
"eval_qa_exact_match": 0.176, |
|
"eval_recite_bleu": 14.071857337313705, |
|
"eval_recite_exact_match": 0.0, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0179, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0066, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.6651764705882353, |
|
"eval_loss": 1.2342489957809448, |
|
"eval_runtime": 6.699, |
|
"eval_samples_per_second": 74.638, |
|
"eval_steps_per_second": 9.404, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_exact_match": 24.0, |
|
"eval_f1": 32.88692081962977, |
|
"eval_qa_bleu": 3.6747877275980905, |
|
"eval_qa_exact_match": 0.174, |
|
"eval_recite_bleu": 14.36158051041567, |
|
"eval_recite_exact_match": 0.006, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9595, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9353, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6628392156862745, |
|
"eval_loss": 1.2645816802978516, |
|
"eval_runtime": 7.4539, |
|
"eval_samples_per_second": 67.079, |
|
"eval_steps_per_second": 8.452, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_exact_match": 23.4, |
|
"eval_f1": 33.50380952380953, |
|
"eval_qa_bleu": 13.968612067397348, |
|
"eval_qa_exact_match": 0.168, |
|
"eval_recite_bleu": 15.026821124315997, |
|
"eval_recite_exact_match": 0.012, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8774, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8629, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.6617333333333333, |
|
"eval_loss": 1.2990769147872925, |
|
"eval_runtime": 6.6963, |
|
"eval_samples_per_second": 74.668, |
|
"eval_steps_per_second": 9.408, |
|
"step": 1312 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_exact_match": 22.0, |
|
"eval_f1": 31.8904761904762, |
|
"eval_qa_bleu": 15.632357578213835, |
|
"eval_qa_exact_match": 0.166, |
|
"eval_recite_bleu": 14.318212267740977, |
|
"eval_recite_exact_match": 0.016, |
|
"step": 1312 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7933, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.659607843137255, |
|
"eval_loss": 1.3466852903366089, |
|
"eval_runtime": 6.6563, |
|
"eval_samples_per_second": 75.117, |
|
"eval_steps_per_second": 9.465, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_exact_match": 22.6, |
|
"eval_f1": 31.16493506493508, |
|
"eval_qa_bleu": 21.45561748665761, |
|
"eval_qa_exact_match": 0.17, |
|
"eval_recite_bleu": 14.23612921374107, |
|
"eval_recite_exact_match": 0.012, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7185, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.6574980392156863, |
|
"eval_loss": 1.391683578491211, |
|
"eval_runtime": 7.7056, |
|
"eval_samples_per_second": 64.888, |
|
"eval_steps_per_second": 8.176, |
|
"step": 1687 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_exact_match": 21.8, |
|
"eval_f1": 31.150317460317464, |
|
"eval_qa_bleu": 18.411626756160196, |
|
"eval_qa_exact_match": 0.17, |
|
"eval_recite_bleu": 15.144735177405446, |
|
"eval_recite_exact_match": 0.014, |
|
"step": 1687 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7176, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6489, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.6549647058823529, |
|
"eval_loss": 1.4609253406524658, |
|
"eval_runtime": 6.6599, |
|
"eval_samples_per_second": 75.076, |
|
"eval_steps_per_second": 9.46, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_exact_match": 24.8, |
|
"eval_f1": 32.80031746031747, |
|
"eval_qa_bleu": 19.203751437503612, |
|
"eval_qa_exact_match": 0.204, |
|
"eval_recite_bleu": 14.497298759377461, |
|
"eval_recite_exact_match": 0.016, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 10.13, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6426, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 5e-05, |
|
"loss": 0.586, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.6531372549019608, |
|
"eval_loss": 1.5205447673797607, |
|
"eval_runtime": 6.6744, |
|
"eval_samples_per_second": 74.913, |
|
"eval_steps_per_second": 9.439, |
|
"step": 2062 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_exact_match": 24.4, |
|
"eval_f1": 32.14761904761905, |
|
"eval_qa_bleu": 17.66314588893528, |
|
"eval_qa_exact_match": 0.198, |
|
"eval_recite_bleu": 14.285546168231928, |
|
"eval_recite_exact_match": 0.016, |
|
"step": 2062 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5625, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 11.73, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5267, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.6518117647058823, |
|
"eval_loss": 1.5615981817245483, |
|
"eval_runtime": 7.0623, |
|
"eval_samples_per_second": 70.799, |
|
"eval_steps_per_second": 8.921, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_exact_match": 22.0, |
|
"eval_f1": 29.764505494505517, |
|
"eval_qa_bleu": 14.595863052742681, |
|
"eval_qa_exact_match": 0.174, |
|
"eval_recite_bleu": 15.269859538584459, |
|
"eval_recite_exact_match": 0.02, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 12.27, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5074, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4702, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.6498588235294117, |
|
"eval_loss": 1.6518086194992065, |
|
"eval_runtime": 7.4952, |
|
"eval_samples_per_second": 66.71, |
|
"eval_steps_per_second": 8.405, |
|
"step": 2437 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_exact_match": 19.8, |
|
"eval_f1": 29.199523809523825, |
|
"eval_qa_bleu": 15.26272043877601, |
|
"eval_qa_exact_match": 0.156, |
|
"eval_recite_bleu": 13.712487638236569, |
|
"eval_recite_exact_match": 0.012, |
|
"step": 2437 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4335, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 13.87, |
|
"learning_rate": 5e-05, |
|
"loss": 0.42, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.6481411764705882, |
|
"eval_loss": 1.7193351984024048, |
|
"eval_runtime": 7.6621, |
|
"eval_samples_per_second": 65.256, |
|
"eval_steps_per_second": 8.222, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_exact_match": 18.4, |
|
"eval_f1": 27.123376623376636, |
|
"eval_qa_bleu": 13.906554273237205, |
|
"eval_qa_exact_match": 0.15, |
|
"eval_recite_bleu": 14.177162892556794, |
|
"eval_recite_exact_match": 0.016, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3784, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 14.93, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3811, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.6458509803921568, |
|
"eval_loss": 1.8015950918197632, |
|
"eval_runtime": 7.7113, |
|
"eval_samples_per_second": 64.84, |
|
"eval_steps_per_second": 8.17, |
|
"step": 2812 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_exact_match": 19.0, |
|
"eval_f1": 28.891111111111115, |
|
"eval_qa_bleu": 16.83679026235492, |
|
"eval_qa_exact_match": 0.154, |
|
"eval_recite_bleu": 13.04215589870331, |
|
"eval_recite_exact_match": 0.012, |
|
"step": 2812 |
|
}, |
|
{ |
|
"epoch": 15.47, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3287, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3373, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.6464627450980392, |
|
"eval_loss": 1.8555551767349243, |
|
"eval_runtime": 6.6548, |
|
"eval_samples_per_second": 75.133, |
|
"eval_steps_per_second": 9.467, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_exact_match": 21.0, |
|
"eval_f1": 30.546060606060603, |
|
"eval_qa_bleu": 15.51818073374785, |
|
"eval_qa_exact_match": 0.178, |
|
"eval_recite_bleu": 14.611971996497491, |
|
"eval_recite_exact_match": 0.016, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 16.53, |
|
"learning_rate": 5e-05, |
|
"loss": 0.284, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.644313725490196, |
|
"eval_loss": 1.9406696557998657, |
|
"eval_runtime": 6.7553, |
|
"eval_samples_per_second": 74.016, |
|
"eval_steps_per_second": 9.326, |
|
"step": 3187 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_exact_match": 19.2, |
|
"eval_f1": 26.95666666666667, |
|
"eval_qa_bleu": 13.067462248071651, |
|
"eval_qa_exact_match": 0.148, |
|
"eval_recite_bleu": 13.516002177466241, |
|
"eval_recite_exact_match": 0.016, |
|
"step": 3187 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2933, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 5e-05, |
|
"loss": 0.25, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.6435764705882353, |
|
"eval_loss": 1.9759635925292969, |
|
"eval_runtime": 7.1655, |
|
"eval_samples_per_second": 69.779, |
|
"eval_steps_per_second": 8.792, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_exact_match": 18.6, |
|
"eval_f1": 27.20079365079366, |
|
"eval_qa_bleu": 12.454425793037345, |
|
"eval_qa_exact_match": 0.158, |
|
"eval_recite_bleu": 13.86644732825742, |
|
"eval_recite_exact_match": 0.014, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 18.13, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2587, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2274, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.6424313725490196, |
|
"eval_loss": 2.100283145904541, |
|
"eval_runtime": 7.4607, |
|
"eval_samples_per_second": 67.018, |
|
"eval_steps_per_second": 8.444, |
|
"step": 3562 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_exact_match": 18.8, |
|
"eval_f1": 26.78857142857144, |
|
"eval_qa_bleu": 6.755652921468526, |
|
"eval_qa_exact_match": 0.16, |
|
"eval_recite_bleu": 13.96083887839662, |
|
"eval_recite_exact_match": 0.018, |
|
"step": 3562 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 9350, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"total_flos": 8.256445032672788e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|