|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.8567511994516792, |
|
"eval_steps": 100, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.993468091981985e-05, |
|
"loss": 0.6403, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8.973891330515646e-05, |
|
"loss": 0.5248, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 8.941326548314407e-05, |
|
"loss": 0.5079, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 8.895868283228029e-05, |
|
"loss": 0.5916, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 8.837648503792598e-05, |
|
"loss": 0.5116, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 8.766836226116608e-05, |
|
"loss": 0.5206, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.683637023215313e-05, |
|
"loss": 0.535, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 8.588292428217834e-05, |
|
"loss": 0.6067, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 8.481079233179524e-05, |
|
"loss": 0.6139, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.362308685535183e-05, |
|
"loss": 0.5015, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_accuracy": 0.0, |
|
"eval_loss": 0.5284319519996643, |
|
"eval_runtime": 27.5165, |
|
"eval_samples_per_second": 141.406, |
|
"eval_steps_per_second": 23.586, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 8.232325584525909e-05, |
|
"loss": 0.6008, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.091507280222672e-05, |
|
"loss": 0.549, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.940262578052567e-05, |
|
"loss": 0.5512, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.779030552007946e-05, |
|
"loss": 0.4872, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.608279269983776e-05, |
|
"loss": 0.505, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.428504434943677e-05, |
|
"loss": 0.4774, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.240227945859396e-05, |
|
"loss": 0.5005, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.043996382601448e-05, |
|
"loss": 0.4286, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 6.840379419179358e-05, |
|
"loss": 0.4746, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 6.629968169938006e-05, |
|
"loss": 0.4259, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_accuracy": 0.0, |
|
"eval_loss": 0.3847603499889374, |
|
"eval_runtime": 28.0384, |
|
"eval_samples_per_second": 138.774, |
|
"eval_steps_per_second": 23.147, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 6.41337347351117e-05, |
|
"loss": 0.4118, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 6.191224119514072e-05, |
|
"loss": 0.3758, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 5.9641650231229666e-05, |
|
"loss": 0.4376, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 5.7328553528410896e-05, |
|
"loss": 0.4189, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 5.497966616886201e-05, |
|
"loss": 0.3686, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 5.2601807137550664e-05, |
|
"loss": 0.377, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 5.020187952624234e-05, |
|
"loss": 0.3708, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.7786850493340155e-05, |
|
"loss": 0.4092, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.536373103773464e-05, |
|
"loss": 0.3521, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.293955564538168e-05, |
|
"loss": 0.3808, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_accuracy": 0.0, |
|
"eval_loss": 0.2961568832397461, |
|
"eval_runtime": 27.3269, |
|
"eval_samples_per_second": 142.387, |
|
"eval_steps_per_second": 23.749, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.052136186769563e-05, |
|
"loss": 0.3133, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 3.811616989104347e-05, |
|
"loss": 0.3472, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.573096215665058e-05, |
|
"loss": 0.3731, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 3.337266309008359e-05, |
|
"loss": 0.3228, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.104811899915661e-05, |
|
"loss": 0.3134, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 2.87640781986189e-05, |
|
"loss": 0.3279, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 2.652717141932312e-05, |
|
"loss": 0.3049, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 2.4343892558748025e-05, |
|
"loss": 0.2683, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.2220579828758074e-05, |
|
"loss": 0.2857, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 2.0163397355329138e-05, |
|
"loss": 0.3328, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.0, |
|
"eval_loss": 0.25916174054145813, |
|
"eval_runtime": 28.4607, |
|
"eval_samples_per_second": 136.715, |
|
"eval_steps_per_second": 22.803, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.817831728365798e-05, |
|
"loss": 0.2938, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.627110244060532e-05, |
|
"loss": 0.2557, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.444728960480465e-05, |
|
"loss": 0.2855, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.2712173433005089e-05, |
|
"loss": 0.2658, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.107079108931101e-05, |
|
"loss": 0.2842, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 9.527907621940789e-06, |
|
"loss": 0.2442, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 8.08800212995727e-06, |
|
"loss": 0.3001, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 6.7552547601285726e-06, |
|
"loss": 0.2603, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 5.533534571668464e-06, |
|
"loss": 0.2707, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.426388304085724e-06, |
|
"loss": 0.2086, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.0, |
|
"eval_loss": 0.24190260469913483, |
|
"eval_runtime": 27.2731, |
|
"eval_samples_per_second": 142.668, |
|
"eval_steps_per_second": 23.796, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 583, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|