|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 38.2043935052531, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00016, |
|
"loss": 9.8321, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00032, |
|
"loss": 4.0687, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00048, |
|
"loss": 2.9707, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00064, |
|
"loss": 2.5203, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.0008, |
|
"loss": 2.2282, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 0.0007822222222222222, |
|
"loss": 2.0162, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 0.0007644444444444445, |
|
"loss": 1.8634, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 0.0007466666666666667, |
|
"loss": 1.7508, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 0.0007288888888888889, |
|
"loss": 1.6579, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 0.0007111111111111111, |
|
"loss": 1.5774, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"eval_bleu": 3.3312, |
|
"eval_gen_len": 18.8762, |
|
"eval_loss": 1.546088695526123, |
|
"eval_runtime": 341.773, |
|
"eval_samples_per_second": 223.804, |
|
"eval_steps_per_second": 1.75, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 0.0006933333333333333, |
|
"loss": 1.5111, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 0.0006755555555555555, |
|
"loss": 1.4553, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"learning_rate": 0.0006577777777777777, |
|
"loss": 1.4026, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 0.00064, |
|
"loss": 1.3509, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 11.46, |
|
"learning_rate": 0.0006222222222222223, |
|
"loss": 1.3105, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 12.23, |
|
"learning_rate": 0.0006044444444444445, |
|
"loss": 1.2714, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"learning_rate": 0.0005866666666666667, |
|
"loss": 1.2358, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"learning_rate": 0.000568888888888889, |
|
"loss": 1.1962, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 14.52, |
|
"learning_rate": 0.0005511111111111112, |
|
"loss": 1.1651, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 15.28, |
|
"learning_rate": 0.0005333333333333334, |
|
"loss": 1.137, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 15.28, |
|
"eval_bleu": 3.8148, |
|
"eval_gen_len": 18.8755, |
|
"eval_loss": 1.4426332712173462, |
|
"eval_runtime": 303.4706, |
|
"eval_samples_per_second": 252.051, |
|
"eval_steps_per_second": 1.971, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"learning_rate": 0.0005155555555555557, |
|
"loss": 1.1102, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 16.81, |
|
"learning_rate": 0.0004977777777777778, |
|
"loss": 1.0793, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 17.57, |
|
"learning_rate": 0.00048, |
|
"loss": 1.053, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 18.34, |
|
"learning_rate": 0.0004622222222222222, |
|
"loss": 1.0295, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 19.1, |
|
"learning_rate": 0.00044444444444444447, |
|
"loss": 1.01, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 19.87, |
|
"learning_rate": 0.00042666666666666667, |
|
"loss": 0.986, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 20.63, |
|
"learning_rate": 0.00040888888888888887, |
|
"loss": 0.9645, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 21.39, |
|
"learning_rate": 0.0003911111111111111, |
|
"loss": 0.9447, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 22.16, |
|
"learning_rate": 0.0003733333333333334, |
|
"loss": 0.9289, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 22.92, |
|
"learning_rate": 0.00035555555555555557, |
|
"loss": 0.9109, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 22.92, |
|
"eval_bleu": 3.9571, |
|
"eval_gen_len": 18.8752, |
|
"eval_loss": 1.475421667098999, |
|
"eval_runtime": 306.5307, |
|
"eval_samples_per_second": 249.535, |
|
"eval_steps_per_second": 1.951, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 23.69, |
|
"learning_rate": 0.00033777777777777777, |
|
"loss": 0.891, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 24.45, |
|
"learning_rate": 0.00032, |
|
"loss": 0.8772, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 25.21, |
|
"learning_rate": 0.0003022222222222222, |
|
"loss": 0.8642, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 25.98, |
|
"learning_rate": 0.0002844444444444445, |
|
"loss": 0.8509, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 26.74, |
|
"learning_rate": 0.0002666666666666667, |
|
"loss": 0.833, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 27.51, |
|
"learning_rate": 0.0002488888888888889, |
|
"loss": 0.8229, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 28.27, |
|
"learning_rate": 0.0002311111111111111, |
|
"loss": 0.8119, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 29.04, |
|
"learning_rate": 0.00021333333333333333, |
|
"loss": 0.8025, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 0.00019555555555555556, |
|
"loss": 0.7901, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 30.56, |
|
"learning_rate": 0.00017777777777777779, |
|
"loss": 0.7807, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 30.56, |
|
"eval_bleu": 3.9767, |
|
"eval_gen_len": 18.8761, |
|
"eval_loss": 1.5373053550720215, |
|
"eval_runtime": 305.2497, |
|
"eval_samples_per_second": 250.582, |
|
"eval_steps_per_second": 1.959, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 0.00016, |
|
"loss": 0.773, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 32.09, |
|
"learning_rate": 0.00014222222222222224, |
|
"loss": 0.7664, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 32.86, |
|
"learning_rate": 0.00012444444444444444, |
|
"loss": 0.7577, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 33.62, |
|
"learning_rate": 0.00010666666666666667, |
|
"loss": 0.7505, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 34.38, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.7456, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 35.15, |
|
"learning_rate": 7.111111111111112e-05, |
|
"loss": 0.7414, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 35.91, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 0.7363, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 36.68, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.7324, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 37.44, |
|
"learning_rate": 1.777777777777778e-05, |
|
"loss": 0.7301, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 38.2, |
|
"learning_rate": 0.0, |
|
"loss": 0.7288, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 38.2, |
|
"eval_bleu": 3.9838, |
|
"eval_gen_len": 18.8778, |
|
"eval_loss": 1.5656607151031494, |
|
"eval_runtime": 304.6836, |
|
"eval_samples_per_second": 251.047, |
|
"eval_steps_per_second": 1.963, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 38.2, |
|
"step": 5000, |
|
"total_flos": 6.138296014525694e+18, |
|
"train_loss": 1.3865033660888673, |
|
"train_runtime": 14019.5814, |
|
"train_samples_per_second": 1460.814, |
|
"train_steps_per_second": 0.357 |
|
} |
|
], |
|
"max_steps": 5000, |
|
"num_train_epochs": 39, |
|
"total_flos": 6.138296014525694e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|