|
{ |
|
"best_metric": 0.47151947021484375, |
|
"best_model_checkpoint": "ckpts/discrim/contrastive_models/gen.t5.large/flan_t5_large_svamp-clean_100K_max_align_5/checkpoint-2000", |
|
"epoch": 0.5918035212309514, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.958579881656805e-06, |
|
"loss": 1.0002, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.91715976331361e-06, |
|
"loss": 1.0001, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.875739644970414e-06, |
|
"loss": 0.9989, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.183431952662722e-05, |
|
"loss": 0.9972, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.4792899408284025e-05, |
|
"loss": 0.995, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.7751479289940828e-05, |
|
"loss": 0.9897, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 2.071005917159763e-05, |
|
"loss": 0.9809, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.366863905325444e-05, |
|
"loss": 0.8868, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.6627218934911244e-05, |
|
"loss": 0.7408, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.958579881656805e-05, |
|
"loss": 0.674, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 3.254437869822485e-05, |
|
"loss": 0.6155, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.5502958579881656e-05, |
|
"loss": 0.5576, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.5003, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.142011834319526e-05, |
|
"loss": 0.4514, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.4378698224852076e-05, |
|
"loss": 0.4074, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.733727810650888e-05, |
|
"loss": 0.3495, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 5.029585798816568e-05, |
|
"loss": 0.3617, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 5.325443786982249e-05, |
|
"loss": 0.3384, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 5.6213017751479294e-05, |
|
"loss": 0.2962, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 5.91715976331361e-05, |
|
"loss": 0.2602, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_loss": 0.6183895468711853, |
|
"eval_runtime": 115.5435, |
|
"eval_samples_per_second": 156.513, |
|
"eval_steps_per_second": 19.568, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 5.9999239256099436e-05, |
|
"loss": 0.2554, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 5.9995658680583245e-05, |
|
"loss": 0.2386, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 5.9989143572558144e-05, |
|
"loss": 0.213, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 5.997969456940961e-05, |
|
"loss": 0.2021, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 5.9967312595551613e-05, |
|
"loss": 0.1926, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 5.9951998862336304e-05, |
|
"loss": 0.1712, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 5.9933754867935396e-05, |
|
"loss": 0.1654, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 5.991258239719367e-05, |
|
"loss": 0.1424, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 5.9888483521454334e-05, |
|
"loss": 0.1452, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 5.986146059835637e-05, |
|
"loss": 0.1485, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 5.983151627160388e-05, |
|
"loss": 0.1182, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 5.9798653470707503e-05, |
|
"loss": 0.1215, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 5.976287541069771e-05, |
|
"loss": 0.1183, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.972418559181037e-05, |
|
"loss": 0.0983, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 5.968258779914428e-05, |
|
"loss": 0.1155, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 5.9638086102290845e-05, |
|
"loss": 0.0874, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.9590684854935956e-05, |
|
"loss": 0.0993, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.9540388694434064e-05, |
|
"loss": 0.088, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.948720254135451e-05, |
|
"loss": 0.0849, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.943113159900009e-05, |
|
"loss": 0.0864, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_loss": 0.47151947021484375, |
|
"eval_runtime": 115.9067, |
|
"eval_samples_per_second": 156.022, |
|
"eval_steps_per_second": 19.507, |
|
"step": 2000 |
|
} |
|
], |
|
"max_steps": 16895, |
|
"num_train_epochs": 5, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|