{ "best_metric": 0.7803163444639719, "best_model_checkpoint": "tiny-bert-sst2-distilled/run-21/checkpoint-1712", "epoch": 13.0, "eval_steps": 500, "global_step": 2782, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 6.750798225402832, "learning_rate": 0.00029949232941444245, "loss": 0.5927, "step": 214 }, { "epoch": 1.0, "eval_accuracy": 0.7147041593438781, "eval_f1": 0.4236686390532544, "eval_loss": 0.5422016978263855, "eval_mcc": 0.293664298583451, "eval_precision": 0.6485507246376812, "eval_recall": 0.3145869947275923, "eval_runtime": 3.1455, "eval_samples_per_second": 542.682, "eval_steps_per_second": 17.167, "step": 214 }, { "epoch": 2.0, "grad_norm": 2.565903902053833, "learning_rate": 0.00027453463529657224, "loss": 0.5298, "step": 428 }, { "epoch": 2.0, "eval_accuracy": 0.7357937902753369, "eval_f1": 0.5124324324324324, "eval_loss": 0.5132952928543091, "eval_mcc": 0.36196008516504746, "eval_precision": 0.6657303370786517, "eval_recall": 0.4165202108963093, "eval_runtime": 3.2065, "eval_samples_per_second": 532.363, "eval_steps_per_second": 16.841, "step": 428 }, { "epoch": 3.0, "grad_norm": 2.1464009284973145, "learning_rate": 0.00024957694117870203, "loss": 0.5051, "step": 642 }, { "epoch": 3.0, "eval_accuracy": 0.7340363210310487, "eval_f1": 0.41191709844559593, "eval_loss": 0.5232194066047668, "eval_mcc": 0.3506417522239517, "eval_precision": 0.7832512315270936, "eval_recall": 0.27943760984182775, "eval_runtime": 3.1624, "eval_samples_per_second": 539.782, "eval_steps_per_second": 17.076, "step": 642 }, { "epoch": 4.0, "grad_norm": 3.173802375793457, "learning_rate": 0.00022461924706083182, "loss": 0.4904, "step": 856 }, { "epoch": 4.0, "eval_accuracy": 0.7434094903339191, "eval_f1": 0.46715328467153294, "eval_loss": 0.49749329686164856, "eval_mcc": 0.37656942971572427, "eval_precision": 0.758893280632411, "eval_recall": 0.3374340949033392, "eval_runtime": 3.4589, "eval_samples_per_second": 493.508, "eval_steps_per_second": 15.612, "step": 856 }, { "epoch": 5.0, "grad_norm": 5.341954708099365, "learning_rate": 0.00019966155294296164, "loss": 0.4762, "step": 1070 }, { "epoch": 5.0, "eval_accuracy": 0.7381370826010545, "eval_f1": 0.6499608457321848, "eval_loss": 0.5032187104225159, "eval_mcc": 0.4515022458491435, "eval_precision": 0.5861581920903954, "eval_recall": 0.7293497363796133, "eval_runtime": 3.1483, "eval_samples_per_second": 542.198, "eval_steps_per_second": 17.152, "step": 1070 }, { "epoch": 6.0, "grad_norm": 4.139582633972168, "learning_rate": 0.00017470385882509143, "loss": 0.4708, "step": 1284 }, { "epoch": 6.0, "eval_accuracy": 0.7662565905096661, "eval_f1": 0.6288372093023256, "eval_loss": 0.48133140802383423, "eval_mcc": 0.46078884011912263, "eval_precision": 0.6679841897233202, "eval_recall": 0.5940246045694201, "eval_runtime": 3.3146, "eval_samples_per_second": 514.988, "eval_steps_per_second": 16.291, "step": 1284 }, { "epoch": 7.0, "grad_norm": 4.1577534675598145, "learning_rate": 0.00014974616470722122, "loss": 0.4562, "step": 1498 }, { "epoch": 7.0, "eval_accuracy": 0.7738722905682484, "eval_f1": 0.5936842105263158, "eval_loss": 0.48448288440704346, "eval_mcc": 0.4625979514063081, "eval_precision": 0.7401574803149606, "eval_recall": 0.4956063268892794, "eval_runtime": 3.1532, "eval_samples_per_second": 541.359, "eval_steps_per_second": 17.126, "step": 1498 }, { "epoch": 8.0, "grad_norm": 4.001567840576172, "learning_rate": 0.00012478847058935101, "loss": 0.4448, "step": 1712 }, { "epoch": 8.0, "eval_accuracy": 0.7803163444639719, "eval_f1": 0.6397694524495678, "eval_loss": 0.46665722131729126, "eval_mcc": 0.48808014304461034, "eval_precision": 0.7055084745762712, "eval_recall": 0.5852372583479789, "eval_runtime": 3.3884, "eval_samples_per_second": 503.777, "eval_steps_per_second": 15.937, "step": 1712 }, { "epoch": 9.0, "grad_norm": 9.252790451049805, "learning_rate": 9.983077647148082e-05, "loss": 0.4375, "step": 1926 }, { "epoch": 9.0, "eval_accuracy": 0.7785588752196837, "eval_f1": 0.6204819277108434, "eval_loss": 0.46744513511657715, "eval_mcc": 0.4782291935803437, "eval_precision": 0.7236533957845434, "eval_recall": 0.5430579964850615, "eval_runtime": 3.1528, "eval_samples_per_second": 541.426, "eval_steps_per_second": 17.128, "step": 1926 }, { "epoch": 10.0, "grad_norm": 6.555416107177734, "learning_rate": 7.487308235361061e-05, "loss": 0.4281, "step": 2140 }, { "epoch": 10.0, "eval_accuracy": 0.7709431751611013, "eval_f1": 0.6530612244897959, "eval_loss": 0.4783839285373688, "eval_mcc": 0.4821703835032232, "eval_precision": 0.6594982078853047, "eval_recall": 0.6467486818980668, "eval_runtime": 3.3012, "eval_samples_per_second": 517.082, "eval_steps_per_second": 16.358, "step": 2140 }, { "epoch": 11.0, "grad_norm": 3.022094488143921, "learning_rate": 4.991538823574041e-05, "loss": 0.4205, "step": 2354 }, { "epoch": 11.0, "eval_accuracy": 0.7803163444639719, "eval_f1": 0.6362754607177498, "eval_loss": 0.4678700566291809, "eval_mcc": 0.48668710736858267, "eval_precision": 0.70995670995671, "eval_recall": 0.5764499121265377, "eval_runtime": 3.1644, "eval_samples_per_second": 539.433, "eval_steps_per_second": 17.065, "step": 2354 }, { "epoch": 12.0, "grad_norm": 3.7709765434265137, "learning_rate": 2.4957694117870205e-05, "loss": 0.4138, "step": 2568 }, { "epoch": 12.0, "eval_accuracy": 0.7797305213825425, "eval_f1": 0.6581818181818181, "eval_loss": 0.4624987542629242, "eval_mcc": 0.4966232685018691, "eval_precision": 0.6817325800376648, "eval_recall": 0.6362038664323374, "eval_runtime": 3.2014, "eval_samples_per_second": 533.205, "eval_steps_per_second": 16.868, "step": 2568 }, { "epoch": 13.0, "grad_norm": 9.869558334350586, "learning_rate": 0.0, "loss": 0.4067, "step": 2782 }, { "epoch": 13.0, "eval_accuracy": 0.7768014059753954, "eval_f1": 0.6613333333333332, "eval_loss": 0.470124214887619, "eval_mcc": 0.4949917624648792, "eval_precision": 0.6690647482014388, "eval_recall": 0.6537785588752196, "eval_runtime": 3.1694, "eval_samples_per_second": 538.585, "eval_steps_per_second": 17.038, "step": 2782 } ], "logging_steps": 500, "max_steps": 2782, "num_input_tokens_seen": 0, "num_train_epochs": 13, "save_steps": 500, "total_flos": 6822083637720.0, "train_batch_size": 32, "trial_name": null, "trial_params": { "alpha": 0.9862491765682508, "learning_rate": 0.00032445002353231265, "num_train_epochs": 13, "temperature": 19 } }