File size: 5,045 Bytes
143f0de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
{
"best_metric": 1.5313832759857178,
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_bert/bert_base_uncased_scotus/checkpoint-200",
"epoch": 2.5316455696202533,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13,
"grad_norm": 7.085313320159912,
"learning_rate": 1.9156118143459917e-05,
"loss": 2.429,
"step": 10
},
{
"epoch": 0.25,
"grad_norm": 4.804141521453857,
"learning_rate": 1.8312236286919833e-05,
"loss": 2.2691,
"step": 20
},
{
"epoch": 0.38,
"grad_norm": 4.930667877197266,
"learning_rate": 1.746835443037975e-05,
"loss": 2.1744,
"step": 30
},
{
"epoch": 0.51,
"grad_norm": 2.996751308441162,
"learning_rate": 1.662447257383966e-05,
"loss": 2.0977,
"step": 40
},
{
"epoch": 0.63,
"grad_norm": 3.7142813205718994,
"learning_rate": 1.578059071729958e-05,
"loss": 2.0086,
"step": 50
},
{
"epoch": 0.63,
"eval_accuracy": 0.38571428571428573,
"eval_f1_macro": 0.09223731954881484,
"eval_f1_micro": 0.38571428571428573,
"eval_loss": 2.006516218185425,
"eval_runtime": 1.2668,
"eval_samples_per_second": 1105.113,
"eval_steps_per_second": 17.366,
"step": 50
},
{
"epoch": 0.76,
"grad_norm": 2.9603490829467773,
"learning_rate": 1.4936708860759495e-05,
"loss": 1.9617,
"step": 60
},
{
"epoch": 0.89,
"grad_norm": 2.7058732509613037,
"learning_rate": 1.4092827004219412e-05,
"loss": 1.8188,
"step": 70
},
{
"epoch": 1.01,
"grad_norm": 3.41971755027771,
"learning_rate": 1.3248945147679326e-05,
"loss": 1.8824,
"step": 80
},
{
"epoch": 1.14,
"grad_norm": 4.253640651702881,
"learning_rate": 1.240506329113924e-05,
"loss": 1.6726,
"step": 90
},
{
"epoch": 1.27,
"grad_norm": 4.203283786773682,
"learning_rate": 1.1561181434599158e-05,
"loss": 1.5947,
"step": 100
},
{
"epoch": 1.27,
"eval_accuracy": 0.475,
"eval_f1_macro": 0.18707990555027815,
"eval_f1_micro": 0.475,
"eval_loss": 1.6981335878372192,
"eval_runtime": 1.3258,
"eval_samples_per_second": 1055.997,
"eval_steps_per_second": 16.594,
"step": 100
},
{
"epoch": 1.39,
"grad_norm": 5.603295803070068,
"learning_rate": 1.0717299578059072e-05,
"loss": 1.6728,
"step": 110
},
{
"epoch": 1.52,
"grad_norm": 5.53965425491333,
"learning_rate": 9.87341772151899e-06,
"loss": 1.6157,
"step": 120
},
{
"epoch": 1.65,
"grad_norm": 3.6127514839172363,
"learning_rate": 9.029535864978903e-06,
"loss": 1.5524,
"step": 130
},
{
"epoch": 1.77,
"grad_norm": 4.825035572052002,
"learning_rate": 8.18565400843882e-06,
"loss": 1.4613,
"step": 140
},
{
"epoch": 1.9,
"grad_norm": 4.025143146514893,
"learning_rate": 7.341772151898735e-06,
"loss": 1.5835,
"step": 150
},
{
"epoch": 1.9,
"eval_accuracy": 0.5378571428571428,
"eval_f1_macro": 0.2780332747870082,
"eval_f1_micro": 0.5378571428571428,
"eval_loss": 1.5822380781173706,
"eval_runtime": 1.3354,
"eval_samples_per_second": 1048.397,
"eval_steps_per_second": 16.475,
"step": 150
},
{
"epoch": 2.03,
"grad_norm": 4.761880397796631,
"learning_rate": 6.49789029535865e-06,
"loss": 1.4868,
"step": 160
},
{
"epoch": 2.15,
"grad_norm": 3.8378217220306396,
"learning_rate": 5.654008438818566e-06,
"loss": 1.4197,
"step": 170
},
{
"epoch": 2.28,
"grad_norm": 3.8176848888397217,
"learning_rate": 4.8101265822784815e-06,
"loss": 1.4556,
"step": 180
},
{
"epoch": 2.41,
"grad_norm": 4.766996383666992,
"learning_rate": 3.9662447257383965e-06,
"loss": 1.3993,
"step": 190
},
{
"epoch": 2.53,
"grad_norm": 3.517928123474121,
"learning_rate": 3.1223628691983127e-06,
"loss": 1.3943,
"step": 200
},
{
"epoch": 2.53,
"eval_accuracy": 0.5514285714285714,
"eval_f1_macro": 0.2849023186790848,
"eval_f1_micro": 0.5514285714285714,
"eval_loss": 1.5313832759857178,
"eval_runtime": 1.3455,
"eval_samples_per_second": 1040.508,
"eval_steps_per_second": 16.351,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 237,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"total_flos": 842038572482560.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}
|