|
{ |
|
"best_metric": 26.3346, |
|
"best_model_checkpoint": "/local1/hfs/gs_stuff/ft-wmt14/checkpoint-100000", |
|
"epoch": 0.354862233609357, |
|
"eval_steps": 10000, |
|
"global_step": 100000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01774311168046785, |
|
"grad_norm": 1.5128649473190308, |
|
"learning_rate": 0.000475, |
|
"loss": 2.4524, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.0354862233609357, |
|
"grad_norm": 1.242777705192566, |
|
"learning_rate": 0.00045000000000000004, |
|
"loss": 2.3103, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.0354862233609357, |
|
"eval_bleu": 22.058, |
|
"eval_gen_len": 27.7263, |
|
"eval_loss": 1.8454290628433228, |
|
"eval_runtime": 238.9627, |
|
"eval_samples_per_second": 12.554, |
|
"eval_steps_per_second": 1.569, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.05322933504140355, |
|
"grad_norm": 0.988516628742218, |
|
"learning_rate": 0.000425, |
|
"loss": 2.2594, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.0709724467218714, |
|
"grad_norm": 1.4387503862380981, |
|
"learning_rate": 0.0004, |
|
"loss": 2.2141, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.0709724467218714, |
|
"eval_bleu": 23.339, |
|
"eval_gen_len": 26.7147, |
|
"eval_loss": 1.7811188697814941, |
|
"eval_runtime": 225.1917, |
|
"eval_samples_per_second": 13.322, |
|
"eval_steps_per_second": 1.665, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.08871555840233925, |
|
"grad_norm": 1.2291666269302368, |
|
"learning_rate": 0.000375, |
|
"loss": 2.1898, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.1064586700828071, |
|
"grad_norm": 1.1732761859893799, |
|
"learning_rate": 0.00035, |
|
"loss": 2.176, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.1064586700828071, |
|
"eval_bleu": 24.3234, |
|
"eval_gen_len": 27.125, |
|
"eval_loss": 1.7360602617263794, |
|
"eval_runtime": 227.357, |
|
"eval_samples_per_second": 13.195, |
|
"eval_steps_per_second": 1.649, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.12420178176327495, |
|
"grad_norm": 1.187321662902832, |
|
"learning_rate": 0.00032500000000000004, |
|
"loss": 2.1468, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.1419448934437428, |
|
"grad_norm": 1.3599053621292114, |
|
"learning_rate": 0.0003, |
|
"loss": 2.139, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.1419448934437428, |
|
"eval_bleu": 25.0888, |
|
"eval_gen_len": 26.8213, |
|
"eval_loss": 1.7130982875823975, |
|
"eval_runtime": 221.7983, |
|
"eval_samples_per_second": 13.526, |
|
"eval_steps_per_second": 1.691, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.15968800512421066, |
|
"grad_norm": 1.4392811059951782, |
|
"learning_rate": 0.000275, |
|
"loss": 2.1151, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 0.1774311168046785, |
|
"grad_norm": 1.4162044525146484, |
|
"learning_rate": 0.00025, |
|
"loss": 2.1084, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.1774311168046785, |
|
"eval_bleu": 24.9992, |
|
"eval_gen_len": 26.824, |
|
"eval_loss": 1.687427043914795, |
|
"eval_runtime": 224.0057, |
|
"eval_samples_per_second": 13.393, |
|
"eval_steps_per_second": 1.674, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.19517422848514634, |
|
"grad_norm": 1.2046048641204834, |
|
"learning_rate": 0.00022500000000000002, |
|
"loss": 2.0914, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 0.2129173401656142, |
|
"grad_norm": 1.2651879787445068, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0826, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.2129173401656142, |
|
"eval_bleu": 25.7297, |
|
"eval_gen_len": 26.62, |
|
"eval_loss": 1.6685482263565063, |
|
"eval_runtime": 221.6914, |
|
"eval_samples_per_second": 13.532, |
|
"eval_steps_per_second": 1.692, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.23066045184608205, |
|
"grad_norm": 1.212643027305603, |
|
"learning_rate": 0.000175, |
|
"loss": 2.0778, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 0.2484035635265499, |
|
"grad_norm": 1.2400418519973755, |
|
"learning_rate": 0.00015, |
|
"loss": 2.068, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 0.2484035635265499, |
|
"eval_bleu": 25.9031, |
|
"eval_gen_len": 26.685, |
|
"eval_loss": 1.648539662361145, |
|
"eval_runtime": 223.211, |
|
"eval_samples_per_second": 13.44, |
|
"eval_steps_per_second": 1.68, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 0.26614667520701774, |
|
"grad_norm": 1.3389995098114014, |
|
"learning_rate": 0.000125, |
|
"loss": 2.0566, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 0.2838897868874856, |
|
"grad_norm": 1.1512677669525146, |
|
"learning_rate": 0.0001, |
|
"loss": 2.05, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 0.2838897868874856, |
|
"eval_bleu": 26.143, |
|
"eval_gen_len": 26.8693, |
|
"eval_loss": 1.6370748281478882, |
|
"eval_runtime": 225.2245, |
|
"eval_samples_per_second": 13.32, |
|
"eval_steps_per_second": 1.665, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 0.30163289856795344, |
|
"grad_norm": 1.1607016324996948, |
|
"learning_rate": 7.5e-05, |
|
"loss": 2.0235, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 0.3193760102484213, |
|
"grad_norm": 1.2967106103897095, |
|
"learning_rate": 5e-05, |
|
"loss": 2.0331, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 0.3193760102484213, |
|
"eval_bleu": 26.3038, |
|
"eval_gen_len": 26.5183, |
|
"eval_loss": 1.6311123371124268, |
|
"eval_runtime": 219.2546, |
|
"eval_samples_per_second": 13.683, |
|
"eval_steps_per_second": 1.71, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 0.33711912192888915, |
|
"grad_norm": 1.2956724166870117, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.0346, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 0.354862233609357, |
|
"grad_norm": 1.1822398900985718, |
|
"learning_rate": 0.0, |
|
"loss": 2.0273, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 0.354862233609357, |
|
"eval_bleu": 26.3346, |
|
"eval_gen_len": 26.6907, |
|
"eval_loss": 1.6253596544265747, |
|
"eval_runtime": 221.5579, |
|
"eval_samples_per_second": 13.54, |
|
"eval_steps_per_second": 1.693, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 0.354862233609357, |
|
"step": 100000, |
|
"total_flos": 3.589813132276531e+16, |
|
"train_loss": 2.132816611328125, |
|
"train_runtime": 15981.9131, |
|
"train_samples_per_second": 100.113, |
|
"train_steps_per_second": 6.257 |
|
} |
|
], |
|
"logging_steps": 5000, |
|
"max_steps": 100000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10000, |
|
"total_flos": 3.589813132276531e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|