File size: 4,597 Bytes
7ddaafa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
{
"best_metric": 0.7850029291154071,
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-26/checkpoint-1498",
"epoch": 7.0,
"eval_steps": 500,
"global_step": 1498,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 7.895910739898682,
"learning_rate": 0.00029747597659594095,
"loss": 0.5937,
"step": 214
},
{
"epoch": 1.0,
"eval_accuracy": 0.7152899824253075,
"eval_f1": 0.425531914893617,
"eval_loss": 0.5441433787345886,
"eval_mcc": 0.2954832365575353,
"eval_precision": 0.6498194945848376,
"eval_recall": 0.3163444639718805,
"eval_runtime": 3.1629,
"eval_samples_per_second": 539.696,
"eval_steps_per_second": 17.073,
"step": 214
},
{
"epoch": 2.0,
"grad_norm": 3.099026918411255,
"learning_rate": 0.00028094953345172204,
"loss": 0.5307,
"step": 428
},
{
"epoch": 2.0,
"eval_accuracy": 0.7451669595782073,
"eval_f1": 0.5435466946484785,
"eval_loss": 0.5096856951713562,
"eval_mcc": 0.3898809523809524,
"eval_precision": 0.6744791666666666,
"eval_recall": 0.45518453427065025,
"eval_runtime": 3.1738,
"eval_samples_per_second": 537.84,
"eval_steps_per_second": 17.014,
"step": 428
},
{
"epoch": 3.0,
"grad_norm": 3.113579750061035,
"learning_rate": 0.0002644230903075031,
"loss": 0.505,
"step": 642
},
{
"epoch": 3.0,
"eval_accuracy": 0.7393087287639133,
"eval_f1": 0.4331210191082803,
"eval_loss": 0.5280328392982483,
"eval_mcc": 0.36632506494660955,
"eval_precision": 0.7870370370370371,
"eval_recall": 0.29876977152899825,
"eval_runtime": 3.1389,
"eval_samples_per_second": 543.826,
"eval_steps_per_second": 17.204,
"step": 642
},
{
"epoch": 4.0,
"grad_norm": 2.820150852203369,
"learning_rate": 0.00024789664716328417,
"loss": 0.4888,
"step": 856
},
{
"epoch": 4.0,
"eval_accuracy": 0.7510251903925015,
"eval_f1": 0.4798041615667075,
"eval_loss": 0.49989721179008484,
"eval_mcc": 0.3996781742784298,
"eval_precision": 0.7903225806451613,
"eval_recall": 0.3444639718804921,
"eval_runtime": 3.3284,
"eval_samples_per_second": 512.86,
"eval_steps_per_second": 16.224,
"step": 856
},
{
"epoch": 5.0,
"grad_norm": 3.9773123264312744,
"learning_rate": 0.00023137020401906518,
"loss": 0.475,
"step": 1070
},
{
"epoch": 5.0,
"eval_accuracy": 0.7656707674282367,
"eval_f1": 0.6190476190476191,
"eval_loss": 0.48136404156684875,
"eval_mcc": 0.4548772091771283,
"eval_precision": 0.6756756756756757,
"eval_recall": 0.5711775043936731,
"eval_runtime": 3.1446,
"eval_samples_per_second": 542.835,
"eval_steps_per_second": 17.172,
"step": 1070
},
{
"epoch": 6.0,
"grad_norm": 4.4520063400268555,
"learning_rate": 0.00021484376087484627,
"loss": 0.4656,
"step": 1284
},
{
"epoch": 6.0,
"eval_accuracy": 0.7650849443468073,
"eval_f1": 0.6337899543378995,
"eval_loss": 0.48336538672447205,
"eval_mcc": 0.46203450521175543,
"eval_precision": 0.6596958174904943,
"eval_recall": 0.6098418277680141,
"eval_runtime": 3.3889,
"eval_samples_per_second": 503.697,
"eval_steps_per_second": 15.934,
"step": 1284
},
{
"epoch": 7.0,
"grad_norm": 7.588683128356934,
"learning_rate": 0.0001983173177306273,
"loss": 0.4555,
"step": 1498
},
{
"epoch": 7.0,
"eval_accuracy": 0.7850029291154071,
"eval_f1": 0.608324439701174,
"eval_loss": 0.48064079880714417,
"eval_mcc": 0.4905686212612055,
"eval_precision": 0.7744565217391305,
"eval_recall": 0.5008787346221442,
"eval_runtime": 3.1528,
"eval_samples_per_second": 541.429,
"eval_steps_per_second": 17.128,
"step": 1498
}
],
"logging_steps": 500,
"max_steps": 4066,
"num_input_tokens_seen": 0,
"num_train_epochs": 19,
"save_steps": 500,
"total_flos": 3673429651080.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"alpha": 0.9884393343643966,
"learning_rate": 0.0003140024197401599,
"num_train_epochs": 19,
"temperature": 18
}
}
|