File size: 3,393 Bytes
29e9859 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
{
"best_metric": 0.8841743119266054,
"best_model_checkpoint": "Tech-oriented/best_model_bert_uncasedbert-base-uncased-finetuned-sst2/run-0/checkpoint-50514",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 50514,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 19.703554153442383,
"learning_rate": 6.973163454116781e-07,
"loss": 0.7045,
"step": 4209
},
{
"epoch": 0.5,
"grad_norm": 33.55769729614258,
"learning_rate": 6.508344788890587e-07,
"loss": 0.6701,
"step": 8418
},
{
"epoch": 0.75,
"grad_norm": 121.4453353881836,
"learning_rate": 6.043526123664394e-07,
"loss": 0.566,
"step": 12627
},
{
"epoch": 1.0,
"grad_norm": 72.47432708740234,
"learning_rate": 5.578707458438201e-07,
"loss": 0.5309,
"step": 16836
},
{
"epoch": 1.0,
"eval_accuracy": 0.8715596330275229,
"eval_loss": 0.3419412672519684,
"eval_runtime": 2.4269,
"eval_samples_per_second": 359.309,
"eval_steps_per_second": 22.663,
"step": 16838
},
{
"epoch": 1.25,
"grad_norm": 169.0806884765625,
"learning_rate": 5.113888793212007e-07,
"loss": 0.5455,
"step": 21045
},
{
"epoch": 1.5,
"grad_norm": 26.04638671875,
"learning_rate": 4.649070127985814e-07,
"loss": 0.5548,
"step": 25254
},
{
"epoch": 1.75,
"grad_norm": 17.553491592407227,
"learning_rate": 4.18425146275962e-07,
"loss": 0.5681,
"step": 29463
},
{
"epoch": 2.0,
"grad_norm": 3.3310186862945557,
"learning_rate": 3.7194327975334265e-07,
"loss": 0.6066,
"step": 33672
},
{
"epoch": 2.0,
"eval_accuracy": 0.8784403669724771,
"eval_loss": 0.3679516315460205,
"eval_runtime": 2.4422,
"eval_samples_per_second": 357.051,
"eval_steps_per_second": 22.52,
"step": 33676
},
{
"epoch": 2.25,
"grad_norm": 60.65003967285156,
"learning_rate": 3.2546141323072335e-07,
"loss": 0.5873,
"step": 37881
},
{
"epoch": 2.5,
"grad_norm": 179.75804138183594,
"learning_rate": 2.78979546708104e-07,
"loss": 0.6103,
"step": 42090
},
{
"epoch": 2.75,
"grad_norm": 0.3105064332485199,
"learning_rate": 2.3249768018548465e-07,
"loss": 0.6097,
"step": 46299
},
{
"epoch": 3.0,
"grad_norm": 22.6956844329834,
"learning_rate": 1.8601581366286533e-07,
"loss": 0.5877,
"step": 50508
},
{
"epoch": 3.0,
"eval_accuracy": 0.8841743119266054,
"eval_loss": 0.40006375312805176,
"eval_runtime": 2.4426,
"eval_samples_per_second": 357.004,
"eval_steps_per_second": 22.517,
"step": 50514
}
],
"logging_steps": 4209,
"max_steps": 67352,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 2484012973657680.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 7.437982119342974e-07,
"num_train_epochs": 4,
"per_device_train_batch_size": 4,
"seed": 7,
"weight_decay": 0.46186751115854463
}
}
|