|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.6955768564499367, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019993414517786287, |
|
"loss": 0.4211, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001997366674486034, |
|
"loss": 0.2148, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019940782690943637, |
|
"loss": 0.1821, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019894805667506615, |
|
"loss": 0.1588, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019835796230723287, |
|
"loss": 0.142, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019763832101712928, |
|
"loss": 0.1332, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00019679008064173867, |
|
"loss": 0.1303, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00019581435839544203, |
|
"loss": 0.126, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00019471243939853908, |
|
"loss": 0.1089, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001934857749846208, |
|
"loss": 0.1077, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001921359807890232, |
|
"loss": 0.0967, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001906648346208798, |
|
"loss": 0.0829, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00018907427412157533, |
|
"loss": 0.0772, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00018736639421268545, |
|
"loss": 0.0744, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0001855434443367628, |
|
"loss": 0.0699, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0001836078254946042, |
|
"loss": 0.0742, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00018156208708290121, |
|
"loss": 0.0679, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00017940892353643866, |
|
"loss": 0.0655, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00017715117077926422, |
|
"loss": 0.0695, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00017479180248950295, |
|
"loss": 0.0619, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00017233392618273645, |
|
"loss": 0.0638, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.00016978077911910502, |
|
"loss": 0.053, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00016713572403952403, |
|
"loss": 0.0403, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0001644022447366296, |
|
"loss": 0.0474, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.0001615839414662879, |
|
"loss": 0.0402, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00015868452620571087, |
|
"loss": 0.0456, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.00015570781776442426, |
|
"loss": 0.0426, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00015265773675452718, |
|
"loss": 0.0416, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.0001495383004268678, |
|
"loss": 0.0422, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.00014635361737993667, |
|
"loss": 0.0395, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00014310788214844618, |
|
"loss": 0.0364, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00013980536967872378, |
|
"loss": 0.0353, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.00013645042969819544, |
|
"loss": 0.0285, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.0001330474809863752, |
|
"loss": 0.0234, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00012960100555490617, |
|
"loss": 0.0234, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.0001261155427443192, |
|
"loss": 0.0243, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00012259568324528335, |
|
"loss": 0.0251, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.00011904606305222381, |
|
"loss": 0.0234, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00011547135735726992, |
|
"loss": 0.0216, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00011187627439257638, |
|
"loss": 0.0195, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 4328, |
|
"num_train_epochs": 8, |
|
"save_steps": 500, |
|
"total_flos": 8.384347351782605e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|