|
{ |
|
"best_metric": 0.5989616200729698, |
|
"best_model_checkpoint": "models_intermediate/intermediate_it5-small/checkpoint-50000", |
|
"epoch": 25.0, |
|
"eval_steps": 10000, |
|
"global_step": 50000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 3.9200000000000004e-05, |
|
"loss": 4.7009, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.8400000000000005e-05, |
|
"loss": 1.7225, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.76e-05, |
|
"loss": 0.5391, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.680000000000001e-05, |
|
"loss": 0.4553, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.4145, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.52e-05, |
|
"loss": 0.39, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.44e-05, |
|
"loss": 0.3579, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3600000000000004e-05, |
|
"loss": 0.3514, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.28e-05, |
|
"loss": 0.329, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.3193, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.24682821333408356, |
|
"eval_p-value": 1.381553198766455e-280, |
|
"eval_runtime": 76.6844, |
|
"eval_samples_per_second": 52.162, |
|
"eval_spearman": 0.5236525970894446, |
|
"eval_steps_per_second": 6.52, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 3.1200000000000006e-05, |
|
"loss": 0.3036, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 3.0400000000000004e-05, |
|
"loss": 0.3049, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 2.96e-05, |
|
"loss": 0.2874, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 2.8800000000000002e-05, |
|
"loss": 0.2789, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.2758, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 2.7200000000000004e-05, |
|
"loss": 0.2708, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 2.6400000000000005e-05, |
|
"loss": 0.2677, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 2.5600000000000002e-05, |
|
"loss": 0.2547, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 2.4800000000000003e-05, |
|
"loss": 0.2597, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.2518, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.20981653034687042, |
|
"eval_p-value": 5.53e-322, |
|
"eval_runtime": 73.524, |
|
"eval_samples_per_second": 54.404, |
|
"eval_spearman": 0.5549641467300505, |
|
"eval_steps_per_second": 6.8, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 2.32e-05, |
|
"loss": 0.2497, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 2.2400000000000002e-05, |
|
"loss": 0.2435, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 2.1600000000000003e-05, |
|
"loss": 0.2442, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 2.08e-05, |
|
"loss": 0.2369, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2412, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.9200000000000003e-05, |
|
"loss": 0.2298, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 1.8400000000000003e-05, |
|
"loss": 0.2282, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.76e-05, |
|
"loss": 0.232, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 1.6800000000000002e-05, |
|
"loss": 0.2302, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.2264, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.19759084284305573, |
|
"eval_p-value": 0.0, |
|
"eval_runtime": 44.4809, |
|
"eval_samples_per_second": 89.926, |
|
"eval_spearman": 0.5808866463657348, |
|
"eval_steps_per_second": 11.241, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 1.5200000000000002e-05, |
|
"loss": 0.2223, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.4400000000000001e-05, |
|
"loss": 0.2232, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 1.3600000000000002e-05, |
|
"loss": 0.2188, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.2800000000000001e-05, |
|
"loss": 0.2257, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.2191, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.1200000000000001e-05, |
|
"loss": 0.2166, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 1.04e-05, |
|
"loss": 0.2177, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.2136, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 8.8e-06, |
|
"loss": 0.2133, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.2127, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.19654138386249542, |
|
"eval_p-value": 0.0, |
|
"eval_runtime": 44.5401, |
|
"eval_samples_per_second": 89.807, |
|
"eval_spearman": 0.5903544136329805, |
|
"eval_steps_per_second": 11.226, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 7.2000000000000005e-06, |
|
"loss": 0.2143, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 6.4000000000000006e-06, |
|
"loss": 0.2104, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 0.2067, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 0.213, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.2098, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"loss": 0.2106, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 0.2071, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"loss": 0.2095, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 0.2053, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.2119, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.19228921830654144, |
|
"eval_p-value": 0.0, |
|
"eval_runtime": 44.4561, |
|
"eval_samples_per_second": 89.976, |
|
"eval_spearman": 0.5989616200729698, |
|
"eval_steps_per_second": 11.247, |
|
"step": 50000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 50000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 25, |
|
"save_steps": 10000, |
|
"total_flos": 3.71702366208e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|