{ "best_metric": 0.44441091037773406, "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-1/checkpoint-8552", "epoch": 4.0, "eval_steps": 500, "global_step": 8552, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.23386342376052385, "grad_norm": 3.146369218826294, "learning_rate": 1.653715321124928e-06, "loss": 0.6079, "step": 500 }, { "epoch": 0.4677268475210477, "grad_norm": 5.078900337219238, "learning_rate": 1.5725712916291023e-06, "loss": 0.6126, "step": 1000 }, { "epoch": 0.7015902712815716, "grad_norm": 7.2771897315979, "learning_rate": 1.4914272621332766e-06, "loss": 0.5551, "step": 1500 }, { "epoch": 0.9354536950420954, "grad_norm": 22.494115829467773, "learning_rate": 1.4102832326374508e-06, "loss": 0.523, "step": 2000 }, { "epoch": 1.0, "eval_loss": 0.5956234335899353, "eval_matthews_correlation": 0.3084289905217366, "eval_runtime": 31.6888, "eval_samples_per_second": 32.914, "eval_steps_per_second": 2.083, "step": 2138 }, { "epoch": 1.1693171188026192, "grad_norm": 14.600356101989746, "learning_rate": 1.329139203141625e-06, "loss": 0.501, "step": 2500 }, { "epoch": 1.4031805425631432, "grad_norm": 20.79261589050293, "learning_rate": 1.2479951736457994e-06, "loss": 0.5081, "step": 3000 }, { "epoch": 1.637043966323667, "grad_norm": 26.76811981201172, "learning_rate": 1.1668511441499738e-06, "loss": 0.4969, "step": 3500 }, { "epoch": 1.8709073900841908, "grad_norm": 24.809595108032227, "learning_rate": 1.0857071146541479e-06, "loss": 0.5316, "step": 4000 }, { "epoch": 2.0, "eval_loss": 0.5633124113082886, "eval_matthews_correlation": 0.4209858842371305, "eval_runtime": 30.8324, "eval_samples_per_second": 33.828, "eval_steps_per_second": 2.141, "step": 4276 }, { "epoch": 2.1047708138447145, "grad_norm": 35.22346878051758, "learning_rate": 1.0045630851583222e-06, "loss": 0.4832, "step": 4500 }, { "epoch": 2.3386342376052385, "grad_norm": 57.915016174316406, "learning_rate": 9.234190556624964e-07, "loss": 0.5068, "step": 5000 }, { "epoch": 2.5724976613657624, "grad_norm": 72.93724822998047, "learning_rate": 8.422750261666709e-07, "loss": 0.4365, "step": 5500 }, { "epoch": 2.8063610851262863, "grad_norm": 41.979488372802734, "learning_rate": 7.611309966708451e-07, "loss": 0.4986, "step": 6000 }, { "epoch": 3.0, "eval_loss": 0.6376824378967285, "eval_matthews_correlation": 0.42676403064167506, "eval_runtime": 29.8373, "eval_samples_per_second": 34.956, "eval_steps_per_second": 2.212, "step": 6414 }, { "epoch": 3.0402245088868103, "grad_norm": 1.3610893487930298, "learning_rate": 6.799869671750195e-07, "loss": 0.5347, "step": 6500 }, { "epoch": 3.2740879326473338, "grad_norm": 57.98234558105469, "learning_rate": 5.988429376791937e-07, "loss": 0.4583, "step": 7000 }, { "epoch": 3.5079513564078577, "grad_norm": 68.71504974365234, "learning_rate": 5.17698908183368e-07, "loss": 0.4624, "step": 7500 }, { "epoch": 3.7418147801683816, "grad_norm": 8.295002937316895, "learning_rate": 4.3655487868754235e-07, "loss": 0.4819, "step": 8000 }, { "epoch": 3.9756782039289056, "grad_norm": 54.670257568359375, "learning_rate": 3.554108491917166e-07, "loss": 0.5088, "step": 8500 }, { "epoch": 4.0, "eval_loss": 0.6887398958206177, "eval_matthews_correlation": 0.44441091037773406, "eval_runtime": 32.0864, "eval_samples_per_second": 32.506, "eval_steps_per_second": 2.057, "step": 8552 } ], "logging_steps": 500, "max_steps": 10690, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 138804100858896.0, "train_batch_size": 4, "trial_name": null, "trial_params": { "learning_rate": 1.7348593506207536e-06, "num_train_epochs": 5, "per_device_train_batch_size": 4, "seed": 22 } }