{ "best_metric": 1.6118786334991455, "best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/FacebookAI/roberta_base_scotus/checkpoint-200", "epoch": 3.0, "eval_steps": 50, "global_step": 237, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.13, "grad_norm": 4.399165153503418, "learning_rate": 1.9156118143459917e-05, "loss": 2.4218, "step": 10 }, { "epoch": 0.25, "grad_norm": 3.196627378463745, "learning_rate": 1.8312236286919833e-05, "loss": 2.1889, "step": 20 }, { "epoch": 0.38, "grad_norm": 2.2070045471191406, "learning_rate": 1.746835443037975e-05, "loss": 2.1449, "step": 30 }, { "epoch": 0.51, "grad_norm": 3.898432493209839, "learning_rate": 1.662447257383966e-05, "loss": 2.0514, "step": 40 }, { "epoch": 0.63, "grad_norm": 5.449430465698242, "learning_rate": 1.578059071729958e-05, "loss": 1.9275, "step": 50 }, { "epoch": 0.63, "eval_accuracy": 0.41, "eval_f1_macro": 0.11253090508032011, "eval_f1_micro": 0.41, "eval_loss": 1.915439486503601, "eval_runtime": 1.2656, "eval_samples_per_second": 1106.191, "eval_steps_per_second": 17.383, "step": 50 }, { "epoch": 0.76, "grad_norm": 4.739038467407227, "learning_rate": 1.4936708860759495e-05, "loss": 1.8774, "step": 60 }, { "epoch": 0.89, "grad_norm": 5.2211127281188965, "learning_rate": 1.4092827004219412e-05, "loss": 1.7642, "step": 70 }, { "epoch": 1.01, "grad_norm": 5.152080535888672, "learning_rate": 1.3248945147679326e-05, "loss": 1.8278, "step": 80 }, { "epoch": 1.14, "grad_norm": 6.538599967956543, "learning_rate": 1.240506329113924e-05, "loss": 1.6386, "step": 90 }, { "epoch": 1.27, "grad_norm": 8.518353462219238, "learning_rate": 1.1561181434599158e-05, "loss": 1.5742, "step": 100 }, { "epoch": 1.27, "eval_accuracy": 0.4707142857142857, "eval_f1_macro": 0.17818280427876487, "eval_f1_micro": 0.4707142857142857, "eval_loss": 1.7446955442428589, "eval_runtime": 1.3224, "eval_samples_per_second": 1058.65, "eval_steps_per_second": 16.636, "step": 100 }, { "epoch": 1.39, "grad_norm": 6.387670516967773, "learning_rate": 1.0717299578059072e-05, "loss": 1.6975, "step": 110 }, { "epoch": 1.52, "grad_norm": 7.155932903289795, "learning_rate": 9.87341772151899e-06, "loss": 1.6592, "step": 120 }, { "epoch": 1.65, "grad_norm": 8.153914451599121, "learning_rate": 9.029535864978903e-06, "loss": 1.6166, "step": 130 }, { "epoch": 1.77, "grad_norm": 7.831178188323975, "learning_rate": 8.18565400843882e-06, "loss": 1.5245, "step": 140 }, { "epoch": 1.9, "grad_norm": 7.43508768081665, "learning_rate": 7.341772151898735e-06, "loss": 1.6563, "step": 150 }, { "epoch": 1.9, "eval_accuracy": 0.4785714285714286, "eval_f1_macro": 0.20760404191283174, "eval_f1_micro": 0.4785714285714286, "eval_loss": 1.6859331130981445, "eval_runtime": 1.2827, "eval_samples_per_second": 1091.43, "eval_steps_per_second": 17.151, "step": 150 }, { "epoch": 2.03, "grad_norm": 8.414676666259766, "learning_rate": 6.49789029535865e-06, "loss": 1.5582, "step": 160 }, { "epoch": 2.15, "grad_norm": 8.495677947998047, "learning_rate": 5.654008438818566e-06, "loss": 1.4579, "step": 170 }, { "epoch": 2.28, "grad_norm": 7.206579685211182, "learning_rate": 4.8101265822784815e-06, "loss": 1.5167, "step": 180 }, { "epoch": 2.41, "grad_norm": 6.6391167640686035, "learning_rate": 3.9662447257383965e-06, "loss": 1.4687, "step": 190 }, { "epoch": 2.53, "grad_norm": 5.48373556137085, "learning_rate": 3.1223628691983127e-06, "loss": 1.4743, "step": 200 }, { "epoch": 2.53, "eval_accuracy": 0.5135714285714286, "eval_f1_macro": 0.24550969494780733, "eval_f1_micro": 0.5135714285714286, "eval_loss": 1.6118786334991455, "eval_runtime": 1.2892, "eval_samples_per_second": 1085.913, "eval_steps_per_second": 17.064, "step": 200 }, { "epoch": 2.66, "grad_norm": 7.982447147369385, "learning_rate": 2.278481012658228e-06, "loss": 1.481, "step": 210 }, { "epoch": 2.78, "grad_norm": 7.101485729217529, "learning_rate": 1.4345991561181436e-06, "loss": 1.4405, "step": 220 }, { "epoch": 2.91, "grad_norm": 9.05599308013916, "learning_rate": 5.907172995780591e-07, "loss": 1.4664, "step": 230 }, { "epoch": 3.0, "step": 237, "total_flos": 997815706714112.0, "train_loss": 1.706272036717411, "train_runtime": 56.9364, "train_samples_per_second": 263.452, "train_steps_per_second": 4.163 } ], "logging_steps": 10, "max_steps": 237, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 50, "total_flos": 997815706714112.0, "train_batch_size": 32, "trial_name": null, "trial_params": null }