|
{ |
|
"best_metric": 0.05951835587620735, |
|
"best_model_checkpoint": "finetuned-Leukemia-cell/checkpoint-600", |
|
"epoch": 20.0, |
|
"eval_steps": 100, |
|
"global_step": 680, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019705882352941177, |
|
"loss": 1.5799, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00019441176470588235, |
|
"loss": 1.1614, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00019147058823529414, |
|
"loss": 0.8677, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00018882352941176472, |
|
"loss": 0.7303, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00018588235294117648, |
|
"loss": 0.4837, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00018294117647058825, |
|
"loss": 0.4625, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00018029411764705885, |
|
"loss": 0.3693, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00017735294117647059, |
|
"loss": 0.331, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00017441176470588235, |
|
"loss": 0.2914, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00017147058823529412, |
|
"loss": 0.2926, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"eval_accuracy": 0.943609022556391, |
|
"eval_loss": 0.15237735211849213, |
|
"eval_runtime": 1.9186, |
|
"eval_samples_per_second": 138.643, |
|
"eval_steps_per_second": 17.721, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.00016852941176470588, |
|
"loss": 0.2343, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00016558823529411765, |
|
"loss": 0.2078, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.0001626470588235294, |
|
"loss": 0.2448, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.0001597058823529412, |
|
"loss": 0.2146, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.00015676470588235294, |
|
"loss": 0.2086, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.00015382352941176473, |
|
"loss": 0.1924, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.00015088235294117647, |
|
"loss": 0.0882, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00014794117647058826, |
|
"loss": 0.1726, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.000145, |
|
"loss": 0.1193, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00014205882352941177, |
|
"loss": 0.1844, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"eval_accuracy": 0.943609022556391, |
|
"eval_loss": 0.1717682033777237, |
|
"eval_runtime": 2.2294, |
|
"eval_samples_per_second": 119.314, |
|
"eval_steps_per_second": 15.251, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 0.00013911764705882353, |
|
"loss": 0.31, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 0.0001361764705882353, |
|
"loss": 0.1387, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 0.00013323529411764706, |
|
"loss": 0.2172, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 0.00013029411764705883, |
|
"loss": 0.1062, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 0.0001273529411764706, |
|
"loss": 0.1055, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 0.00012441176470588236, |
|
"loss": 0.087, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.00012147058823529412, |
|
"loss": 0.0817, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 0.00011852941176470589, |
|
"loss": 0.0727, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 0.00011558823529411764, |
|
"loss": 0.0561, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 0.00011264705882352942, |
|
"loss": 0.1189, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"eval_accuracy": 0.9661654135338346, |
|
"eval_loss": 0.08863785862922668, |
|
"eval_runtime": 1.8695, |
|
"eval_samples_per_second": 142.285, |
|
"eval_steps_per_second": 18.187, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 0.0001097058823529412, |
|
"loss": 0.0705, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.41, |
|
"learning_rate": 0.00010676470588235295, |
|
"loss": 0.147, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 0.00010382352941176472, |
|
"loss": 0.0612, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.00010088235294117648, |
|
"loss": 0.0926, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.29, |
|
"learning_rate": 9.794117647058824e-05, |
|
"loss": 0.0977, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 10.59, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.1104, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 9.205882352941177e-05, |
|
"loss": 0.0834, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 11.18, |
|
"learning_rate": 8.911764705882354e-05, |
|
"loss": 0.0831, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 11.47, |
|
"learning_rate": 8.61764705882353e-05, |
|
"loss": 0.0464, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 8.323529411764707e-05, |
|
"loss": 0.0682, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"eval_accuracy": 0.9699248120300752, |
|
"eval_loss": 0.09778111428022385, |
|
"eval_runtime": 2.3942, |
|
"eval_samples_per_second": 111.103, |
|
"eval_steps_per_second": 14.201, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.06, |
|
"learning_rate": 8.029411764705883e-05, |
|
"loss": 0.0504, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 12.35, |
|
"learning_rate": 7.73529411764706e-05, |
|
"loss": 0.0379, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 12.65, |
|
"learning_rate": 7.441176470588236e-05, |
|
"loss": 0.0261, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 12.94, |
|
"learning_rate": 7.147058823529412e-05, |
|
"loss": 0.0838, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 13.24, |
|
"learning_rate": 6.852941176470589e-05, |
|
"loss": 0.0262, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 13.53, |
|
"learning_rate": 6.558823529411765e-05, |
|
"loss": 0.0232, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 6.264705882352942e-05, |
|
"loss": 0.0403, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 14.12, |
|
"learning_rate": 5.970588235294118e-05, |
|
"loss": 0.049, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"learning_rate": 5.676470588235294e-05, |
|
"loss": 0.0606, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"learning_rate": 5.382352941176471e-05, |
|
"loss": 0.0439, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"eval_accuracy": 0.9736842105263158, |
|
"eval_loss": 0.08120405673980713, |
|
"eval_runtime": 1.897, |
|
"eval_samples_per_second": 140.22, |
|
"eval_steps_per_second": 17.923, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 5.088235294117647e-05, |
|
"loss": 0.0495, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 15.29, |
|
"learning_rate": 4.794117647058824e-05, |
|
"loss": 0.0337, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 15.59, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.0987, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 15.88, |
|
"learning_rate": 4.205882352941177e-05, |
|
"loss": 0.0218, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 3.911764705882353e-05, |
|
"loss": 0.0324, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 16.47, |
|
"learning_rate": 3.61764705882353e-05, |
|
"loss": 0.0364, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 16.76, |
|
"learning_rate": 3.3235294117647056e-05, |
|
"loss": 0.0284, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 17.06, |
|
"learning_rate": 3.0294117647058824e-05, |
|
"loss": 0.0306, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 17.35, |
|
"learning_rate": 2.7352941176470593e-05, |
|
"loss": 0.0535, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 2.4411764705882354e-05, |
|
"loss": 0.0544, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"eval_accuracy": 0.981203007518797, |
|
"eval_loss": 0.05951835587620735, |
|
"eval_runtime": 1.8559, |
|
"eval_samples_per_second": 143.327, |
|
"eval_steps_per_second": 18.32, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 17.94, |
|
"learning_rate": 2.1470588235294116e-05, |
|
"loss": 0.0121, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"learning_rate": 1.8529411764705884e-05, |
|
"loss": 0.0049, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 18.53, |
|
"learning_rate": 1.558823529411765e-05, |
|
"loss": 0.0086, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"learning_rate": 1.2647058823529412e-05, |
|
"loss": 0.0102, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 19.12, |
|
"learning_rate": 9.705882352941177e-06, |
|
"loss": 0.0342, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 19.41, |
|
"learning_rate": 6.7647058823529414e-06, |
|
"loss": 0.0367, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 19.71, |
|
"learning_rate": 3.823529411764706e-06, |
|
"loss": 0.0313, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 8.823529411764706e-07, |
|
"loss": 0.0465, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 680, |
|
"total_flos": 5.337848001643315e+17, |
|
"train_loss": 0.17142059330773704, |
|
"train_runtime": 289.6781, |
|
"train_samples_per_second": 73.323, |
|
"train_steps_per_second": 2.347 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 680, |
|
"num_train_epochs": 20, |
|
"save_steps": 100, |
|
"total_flos": 5.337848001643315e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|