| { | |
| "best_metric": 0.05250174552202225, | |
| "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/prajjwal1/bert-small-codesearchnet-python/checkpoint-4125", | |
| "epoch": 13.0, | |
| "global_step": 4875, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_avg_length": 10.684, | |
| "eval_bleu": 0.0, | |
| "eval_loss": 1.2151237726211548, | |
| "eval_rouge1": 0.0928, | |
| "eval_rouge2": 0.0083, | |
| "eval_runtime": 138.1107, | |
| "eval_samples_per_second": 36.203, | |
| "eval_steps_per_second": 4.525, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 2.354011121497024e-05, | |
| "loss": 1.9359, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_avg_length": 15.0624, | |
| "eval_bleu": 0.0032, | |
| "eval_loss": 1.0291130542755127, | |
| "eval_rouge1": 0.1752, | |
| "eval_rouge2": 0.0338, | |
| "eval_runtime": 131.019, | |
| "eval_samples_per_second": 38.162, | |
| "eval_steps_per_second": 4.77, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 4.7082288801902905e-05, | |
| "loss": 0.9422, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_avg_length": 17.9358, | |
| "eval_bleu": 0.0061, | |
| "eval_loss": 0.9172993898391724, | |
| "eval_rouge1": 0.2506, | |
| "eval_rouge2": 0.0711, | |
| "eval_runtime": 129.41, | |
| "eval_samples_per_second": 38.637, | |
| "eval_steps_per_second": 4.83, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 7.063511293381453e-05, | |
| "loss": 0.776, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_avg_length": 18.3724, | |
| "eval_bleu": 0.0088, | |
| "eval_loss": 0.8057555556297302, | |
| "eval_rouge1": 0.3321, | |
| "eval_rouge2": 0.1409, | |
| "eval_runtime": 125.5689, | |
| "eval_samples_per_second": 39.819, | |
| "eval_steps_per_second": 4.977, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_avg_length": 18.564, | |
| "eval_bleu": 0.0123, | |
| "eval_loss": 0.6914781928062439, | |
| "eval_rouge1": 0.4044, | |
| "eval_rouge2": 0.2267, | |
| "eval_runtime": 125.7562, | |
| "eval_samples_per_second": 39.759, | |
| "eval_steps_per_second": 4.97, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 9.420605056220666e-05, | |
| "loss": 0.6218, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_avg_length": 17.5586, | |
| "eval_bleu": 0.0193, | |
| "eval_loss": 0.5281431674957275, | |
| "eval_rouge1": 0.5382, | |
| "eval_rouge2": 0.4097, | |
| "eval_runtime": 124.9018, | |
| "eval_samples_per_second": 40.031, | |
| "eval_steps_per_second": 5.004, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "learning_rate": 0.00011781098874052987, | |
| "loss": 0.4363, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_avg_length": 17.8768, | |
| "eval_bleu": 0.0333, | |
| "eval_loss": 0.18967217206954956, | |
| "eval_rouge1": 0.6311, | |
| "eval_rouge2": 0.6002, | |
| "eval_runtime": 127.7329, | |
| "eval_samples_per_second": 39.144, | |
| "eval_steps_per_second": 4.893, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 0.0001414699072483927, | |
| "loss": 0.1518, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_avg_length": 17.879, | |
| "eval_bleu": 0.0346, | |
| "eval_loss": 0.08337126672267914, | |
| "eval_rouge1": 0.6413, | |
| "eval_rouge2": 0.621, | |
| "eval_runtime": 127.6119, | |
| "eval_samples_per_second": 39.181, | |
| "eval_steps_per_second": 4.898, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_avg_length": 17.8886, | |
| "eval_bleu": 0.0349, | |
| "eval_loss": 0.058715466409921646, | |
| "eval_rouge1": 0.6439, | |
| "eval_rouge2": 0.6268, | |
| "eval_runtime": 128.3455, | |
| "eval_samples_per_second": 38.957, | |
| "eval_steps_per_second": 4.87, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 9.33, | |
| "learning_rate": 0.000165146600920707, | |
| "loss": 0.0579, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_avg_length": 17.885, | |
| "eval_bleu": 0.0348, | |
| "eval_loss": 0.054685767740011215, | |
| "eval_rouge1": 0.6443, | |
| "eval_rouge2": 0.6276, | |
| "eval_runtime": 125.2586, | |
| "eval_samples_per_second": 39.917, | |
| "eval_steps_per_second": 4.99, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 10.67, | |
| "learning_rate": 0.00018887515761889517, | |
| "loss": 0.0437, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_avg_length": 17.8766, | |
| "eval_bleu": 0.0348, | |
| "eval_loss": 0.05250174552202225, | |
| "eval_rouge1": 0.6442, | |
| "eval_rouge2": 0.6278, | |
| "eval_runtime": 125.5302, | |
| "eval_samples_per_second": 39.831, | |
| "eval_steps_per_second": 4.979, | |
| "step": 4125 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 0.0002126803301507607, | |
| "loss": 0.0365, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_avg_length": 17.8876, | |
| "eval_bleu": 0.0347, | |
| "eval_loss": 0.05503207445144653, | |
| "eval_rouge1": 0.6436, | |
| "eval_rouge2": 0.6266, | |
| "eval_runtime": 126.9255, | |
| "eval_samples_per_second": 39.393, | |
| "eval_steps_per_second": 4.924, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_avg_length": 17.876, | |
| "eval_bleu": 0.0347, | |
| "eval_loss": 0.05446252599358559, | |
| "eval_rouge1": 0.6439, | |
| "eval_rouge2": 0.627, | |
| "eval_runtime": 126.1815, | |
| "eval_samples_per_second": 39.625, | |
| "eval_steps_per_second": 4.953, | |
| "step": 4875 | |
| } | |
| ], | |
| "max_steps": 5625, | |
| "num_train_epochs": 15, | |
| "total_flos": 1.79619568128e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |