|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"global_step": 14555, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.828237718996909e-05, |
|
"loss": 0.1728, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.6564754379938166e-05, |
|
"loss": 0.0268, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.484713156990725e-05, |
|
"loss": 0.0183, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.3129508759876336e-05, |
|
"loss": 0.0129, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.1411885949845415e-05, |
|
"loss": 0.0113, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.96942631398145e-05, |
|
"loss": 0.0099, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.797664032978358e-05, |
|
"loss": 0.0091, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 3.6259017519752663e-05, |
|
"loss": 0.0084, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 3.454139470972175e-05, |
|
"loss": 0.0071, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 3.2823771899690834e-05, |
|
"loss": 0.0074, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 3.110614908965991e-05, |
|
"loss": 0.0063, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 2.9388526279628997e-05, |
|
"loss": 0.0061, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 2.7670903469598076e-05, |
|
"loss": 0.0055, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.5953280659567158e-05, |
|
"loss": 0.005, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 2.4235657849536243e-05, |
|
"loss": 0.0054, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 2.2518035039505325e-05, |
|
"loss": 0.0051, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.080041222947441e-05, |
|
"loss": 0.0046, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 1.908278941944349e-05, |
|
"loss": 0.0045, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.7365166609412573e-05, |
|
"loss": 0.0041, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 1.5647543799381655e-05, |
|
"loss": 0.0038, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.3929920989350739e-05, |
|
"loss": 0.0038, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.2212298179319822e-05, |
|
"loss": 0.0041, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 1.0494675369288904e-05, |
|
"loss": 0.0037, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 8.777052559257987e-06, |
|
"loss": 0.0034, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 7.05942974922707e-06, |
|
"loss": 0.0028, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 5.341806939196153e-06, |
|
"loss": 0.0033, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 3.6241841291652353e-06, |
|
"loss": 0.0033, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.906561319134318e-06, |
|
"loss": 0.0034, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.889385091034009e-07, |
|
"loss": 0.0031, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 14555, |
|
"train_runtime": 883.0149, |
|
"train_samples_per_second": 16.483 |
|
} |
|
], |
|
"max_steps": 14555, |
|
"num_train_epochs": 5, |
|
"total_flos": 17426167707598848, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|