{ "best_metric": 0.6711095571517944, "best_model_checkpoint": "distil-bert-fintuned-issues-cfpb-complaints/checkpoint-23206", "epoch": 2.0, "eval_steps": 500, "global_step": 23206, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.04, "learning_rate": 1.9569076962854436e-05, "loss": 1.4292, "step": 500 }, { "epoch": 0.09, "learning_rate": 1.9138153925708868e-05, "loss": 1.1124, "step": 1000 }, { "epoch": 0.13, "learning_rate": 1.8707230888563303e-05, "loss": 1.0067, "step": 1500 }, { "epoch": 0.17, "learning_rate": 1.8276307851417738e-05, "loss": 0.9522, "step": 2000 }, { "epoch": 0.22, "learning_rate": 1.7845384814272172e-05, "loss": 0.8931, "step": 2500 }, { "epoch": 0.26, "learning_rate": 1.7414461777126604e-05, "loss": 0.8832, "step": 3000 }, { "epoch": 0.3, "learning_rate": 1.698353873998104e-05, "loss": 0.8627, "step": 3500 }, { "epoch": 0.34, "learning_rate": 1.6552615702835474e-05, "loss": 0.8511, "step": 4000 }, { "epoch": 0.39, "learning_rate": 1.612169266568991e-05, "loss": 0.835, "step": 4500 }, { "epoch": 0.43, "learning_rate": 1.5690769628544343e-05, "loss": 0.8137, "step": 5000 }, { "epoch": 0.47, "learning_rate": 1.5259846591398778e-05, "loss": 0.8188, "step": 5500 }, { "epoch": 0.52, "learning_rate": 1.4828923554253211e-05, "loss": 0.8164, "step": 6000 }, { "epoch": 0.56, "learning_rate": 1.4398000517107646e-05, "loss": 0.7985, "step": 6500 }, { "epoch": 0.6, "learning_rate": 1.396707747996208e-05, "loss": 0.7913, "step": 7000 }, { "epoch": 0.65, "learning_rate": 1.3536154442816514e-05, "loss": 0.7857, "step": 7500 }, { "epoch": 0.69, "learning_rate": 1.3105231405670949e-05, "loss": 0.7922, "step": 8000 }, { "epoch": 0.73, "learning_rate": 1.2674308368525384e-05, "loss": 0.7649, "step": 8500 }, { "epoch": 0.78, "learning_rate": 1.2243385331379815e-05, "loss": 0.7679, "step": 9000 }, { "epoch": 0.82, "learning_rate": 1.181246229423425e-05, "loss": 0.7558, "step": 9500 }, { "epoch": 0.86, "learning_rate": 1.1381539257088685e-05, "loss": 0.7499, "step": 10000 }, { "epoch": 0.9, "learning_rate": 1.095061621994312e-05, "loss": 0.7278, "step": 10500 }, { "epoch": 0.95, "learning_rate": 1.0519693182797553e-05, "loss": 0.7288, "step": 11000 }, { "epoch": 0.99, "learning_rate": 1.0088770145651988e-05, "loss": 0.7269, "step": 11500 }, { "epoch": 1.0, "eval_accuracy": 0.7437621202327085, "eval_f1": 0.5490839527402266, "eval_loss": 0.7186761498451233, "eval_precision": 0.5852853204976994, "eval_recall": 0.532749286993408, "eval_runtime": 244.2988, "eval_samples_per_second": 253.296, "eval_steps_per_second": 15.833, "step": 11603 }, { "epoch": 1.03, "learning_rate": 9.657847108506421e-06, "loss": 0.7075, "step": 12000 }, { "epoch": 1.08, "learning_rate": 9.226924071360856e-06, "loss": 0.6624, "step": 12500 }, { "epoch": 1.12, "learning_rate": 8.79600103421529e-06, "loss": 0.6742, "step": 13000 }, { "epoch": 1.16, "learning_rate": 8.365077997069724e-06, "loss": 0.6966, "step": 13500 }, { "epoch": 1.21, "learning_rate": 7.934154959924159e-06, "loss": 0.6689, "step": 14000 }, { "epoch": 1.25, "learning_rate": 7.503231922778592e-06, "loss": 0.6536, "step": 14500 }, { "epoch": 1.29, "learning_rate": 7.072308885633027e-06, "loss": 0.6711, "step": 15000 }, { "epoch": 1.34, "learning_rate": 6.641385848487461e-06, "loss": 0.653, "step": 15500 }, { "epoch": 1.38, "learning_rate": 6.2104628113418955e-06, "loss": 0.6358, "step": 16000 }, { "epoch": 1.42, "learning_rate": 5.779539774196329e-06, "loss": 0.6607, "step": 16500 }, { "epoch": 1.47, "learning_rate": 5.3486167370507635e-06, "loss": 0.6536, "step": 17000 }, { "epoch": 1.51, "learning_rate": 4.9176936999051975e-06, "loss": 0.6799, "step": 17500 }, { "epoch": 1.55, "learning_rate": 4.4867706627596315e-06, "loss": 0.6422, "step": 18000 }, { "epoch": 1.59, "learning_rate": 4.0558476256140655e-06, "loss": 0.6323, "step": 18500 }, { "epoch": 1.64, "learning_rate": 3.6249245884685e-06, "loss": 0.6499, "step": 19000 }, { "epoch": 1.68, "learning_rate": 3.194001551322934e-06, "loss": 0.6522, "step": 19500 }, { "epoch": 1.72, "learning_rate": 2.763078514177368e-06, "loss": 0.6451, "step": 20000 }, { "epoch": 1.77, "learning_rate": 2.3321554770318023e-06, "loss": 0.6495, "step": 20500 }, { "epoch": 1.81, "learning_rate": 1.9012324398862363e-06, "loss": 0.635, "step": 21000 }, { "epoch": 1.85, "learning_rate": 1.4703094027406705e-06, "loss": 0.6311, "step": 21500 }, { "epoch": 1.9, "learning_rate": 1.0393863655951047e-06, "loss": 0.6392, "step": 22000 }, { "epoch": 1.94, "learning_rate": 6.08463328449539e-07, "loss": 0.6471, "step": 22500 }, { "epoch": 1.98, "learning_rate": 1.7754029130397314e-07, "loss": 0.612, "step": 23000 }, { "epoch": 2.0, "eval_accuracy": 0.7651745313510019, "eval_f1": 0.5702070410433322, "eval_loss": 0.6711095571517944, "eval_precision": 0.5934826294010517, "eval_recall": 0.5577165037333897, "eval_runtime": 245.1951, "eval_samples_per_second": 252.37, "eval_steps_per_second": 15.775, "step": 23206 } ], "logging_steps": 500, "max_steps": 23206, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "total_flos": 4.919485725586022e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }