|
{ |
|
"best_metric": 0.0946127399802208, |
|
"best_model_checkpoint": "finetuned-Leukemia-cell/checkpoint-600", |
|
"epoch": 20.0, |
|
"eval_steps": 100, |
|
"global_step": 680, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019794117647058826, |
|
"loss": 1.9655, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.000195, |
|
"loss": 1.7836, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0001920588235294118, |
|
"loss": 1.5744, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00018911764705882353, |
|
"loss": 1.3903, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00018617647058823532, |
|
"loss": 1.3596, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00018323529411764706, |
|
"loss": 1.3915, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00018029411764705885, |
|
"loss": 1.2525, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00017735294117647059, |
|
"loss": 1.167, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00017441176470588235, |
|
"loss": 1.0339, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00017147058823529412, |
|
"loss": 0.9733, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"eval_accuracy": 0.7255639097744361, |
|
"eval_loss": 0.8894439339637756, |
|
"eval_runtime": 2.491, |
|
"eval_samples_per_second": 106.783, |
|
"eval_steps_per_second": 13.649, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.00016852941176470588, |
|
"loss": 0.9561, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00016558823529411765, |
|
"loss": 0.9493, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.0001626470588235294, |
|
"loss": 0.8258, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 0.0001597058823529412, |
|
"loss": 0.8111, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.00015676470588235294, |
|
"loss": 0.9361, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 0.00015382352941176473, |
|
"loss": 0.7733, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.00015088235294117647, |
|
"loss": 0.5113, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00014794117647058826, |
|
"loss": 0.7088, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.000145, |
|
"loss": 0.6652, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00014205882352941177, |
|
"loss": 0.7184, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"eval_accuracy": 0.7293233082706767, |
|
"eval_loss": 0.7875903248786926, |
|
"eval_runtime": 2.5205, |
|
"eval_samples_per_second": 105.533, |
|
"eval_steps_per_second": 13.489, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 0.00013911764705882353, |
|
"loss": 0.9143, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 0.0001361764705882353, |
|
"loss": 0.7709, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 0.00013323529411764706, |
|
"loss": 0.5797, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 0.00013029411764705883, |
|
"loss": 0.5424, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 0.0001273529411764706, |
|
"loss": 0.6525, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 0.00012441176470588236, |
|
"loss": 0.3516, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.00012147058823529412, |
|
"loss": 0.4829, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 0.00011852941176470589, |
|
"loss": 0.5154, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 0.00011558823529411764, |
|
"loss": 0.3485, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 0.00011264705882352942, |
|
"loss": 0.5299, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"eval_accuracy": 0.8609022556390977, |
|
"eval_loss": 0.5183172821998596, |
|
"eval_runtime": 2.8712, |
|
"eval_samples_per_second": 92.644, |
|
"eval_steps_per_second": 11.842, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 0.0001097058823529412, |
|
"loss": 0.5263, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.41, |
|
"learning_rate": 0.00010676470588235295, |
|
"loss": 0.4048, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 0.00010382352941176472, |
|
"loss": 0.4386, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.00010088235294117648, |
|
"loss": 0.4728, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.29, |
|
"learning_rate": 9.794117647058824e-05, |
|
"loss": 0.379, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 10.59, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.4361, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 9.205882352941177e-05, |
|
"loss": 0.2929, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 11.18, |
|
"learning_rate": 8.911764705882354e-05, |
|
"loss": 0.2848, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 11.47, |
|
"learning_rate": 8.61764705882353e-05, |
|
"loss": 0.4282, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 8.323529411764707e-05, |
|
"loss": 0.3991, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"eval_accuracy": 0.8947368421052632, |
|
"eval_loss": 0.31206753849983215, |
|
"eval_runtime": 2.9634, |
|
"eval_samples_per_second": 89.761, |
|
"eval_steps_per_second": 11.473, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.06, |
|
"learning_rate": 8.029411764705883e-05, |
|
"loss": 0.3552, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 12.35, |
|
"learning_rate": 7.73529411764706e-05, |
|
"loss": 0.2886, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 12.65, |
|
"learning_rate": 7.441176470588236e-05, |
|
"loss": 0.2809, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 12.94, |
|
"learning_rate": 7.147058823529412e-05, |
|
"loss": 0.3627, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 13.24, |
|
"learning_rate": 6.852941176470589e-05, |
|
"loss": 0.3137, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 13.53, |
|
"learning_rate": 6.558823529411765e-05, |
|
"loss": 0.198, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 6.264705882352942e-05, |
|
"loss": 0.3268, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 14.12, |
|
"learning_rate": 5.970588235294118e-05, |
|
"loss": 0.256, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"learning_rate": 5.676470588235294e-05, |
|
"loss": 0.2544, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"learning_rate": 5.382352941176471e-05, |
|
"loss": 0.2263, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"eval_accuracy": 0.9548872180451128, |
|
"eval_loss": 0.13367173075675964, |
|
"eval_runtime": 2.4729, |
|
"eval_samples_per_second": 107.566, |
|
"eval_steps_per_second": 13.749, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 5.088235294117647e-05, |
|
"loss": 0.4011, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 15.29, |
|
"learning_rate": 4.794117647058824e-05, |
|
"loss": 0.2051, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 15.59, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.2563, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 15.88, |
|
"learning_rate": 4.205882352941177e-05, |
|
"loss": 0.2428, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 3.911764705882353e-05, |
|
"loss": 0.189, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 16.47, |
|
"learning_rate": 3.61764705882353e-05, |
|
"loss": 0.19, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 16.76, |
|
"learning_rate": 3.3235294117647056e-05, |
|
"loss": 0.1696, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 17.06, |
|
"learning_rate": 3.0294117647058824e-05, |
|
"loss": 0.1336, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 17.35, |
|
"learning_rate": 2.7352941176470593e-05, |
|
"loss": 0.1532, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 2.4411764705882354e-05, |
|
"loss": 0.1782, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"eval_accuracy": 0.9624060150375939, |
|
"eval_loss": 0.0946127399802208, |
|
"eval_runtime": 2.8201, |
|
"eval_samples_per_second": 94.322, |
|
"eval_steps_per_second": 12.056, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 17.94, |
|
"learning_rate": 2.1470588235294116e-05, |
|
"loss": 0.1511, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"learning_rate": 1.8529411764705884e-05, |
|
"loss": 0.0837, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 18.53, |
|
"learning_rate": 1.558823529411765e-05, |
|
"loss": 0.1084, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"learning_rate": 1.2647058823529412e-05, |
|
"loss": 0.1313, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 19.12, |
|
"learning_rate": 9.705882352941177e-06, |
|
"loss": 0.1135, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 19.41, |
|
"learning_rate": 6.7647058823529414e-06, |
|
"loss": 0.0844, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 19.71, |
|
"learning_rate": 3.823529411764706e-06, |
|
"loss": 0.1765, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 8.823529411764706e-07, |
|
"loss": 0.1745, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 680, |
|
"total_flos": 1.6434612963230515e+18, |
|
"train_loss": 0.5515072485103326, |
|
"train_runtime": 465.2023, |
|
"train_samples_per_second": 45.658, |
|
"train_steps_per_second": 1.462 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 680, |
|
"num_train_epochs": 20, |
|
"save_steps": 100, |
|
"total_flos": 1.6434612963230515e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|