|
{ |
|
"best_metric": 1.050220012664795, |
|
"best_model_checkpoint": "/kaggle/output/checkpoint-27000", |
|
"epoch": 1.1000651890482398, |
|
"eval_steps": 1000, |
|
"global_step": 27000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.7777777777777777e-11, |
|
"loss": 1.1065, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.7750000000000004e-08, |
|
"loss": 1.1586, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.32954091816367265, |
|
"eval_loss": 1.1065887212753296, |
|
"eval_runtime": 54.4372, |
|
"eval_samples_per_second": 92.033, |
|
"eval_steps_per_second": 11.518, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.5527777777777784e-08, |
|
"loss": 1.1259, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.3435129740518962, |
|
"eval_loss": 1.1045255661010742, |
|
"eval_runtime": 54.458, |
|
"eval_samples_per_second": 91.997, |
|
"eval_steps_per_second": 11.513, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.327777777777778e-08, |
|
"loss": 1.1229, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.3441117764471058, |
|
"eval_loss": 1.1050777435302734, |
|
"eval_runtime": 54.349, |
|
"eval_samples_per_second": 92.182, |
|
"eval_steps_per_second": 11.537, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.1105555555555557e-07, |
|
"loss": 1.1237, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.3411177644710579, |
|
"eval_loss": 1.1015312671661377, |
|
"eval_runtime": 54.4388, |
|
"eval_samples_per_second": 92.03, |
|
"eval_steps_per_second": 11.518, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.3883333333333335e-07, |
|
"loss": 1.1202, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.3405189620758483, |
|
"eval_loss": 1.1006112098693848, |
|
"eval_runtime": 54.4109, |
|
"eval_samples_per_second": 92.077, |
|
"eval_steps_per_second": 11.523, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.6658333333333335e-07, |
|
"loss": 1.1182, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.35489021956087824, |
|
"eval_loss": 1.0982259511947632, |
|
"eval_runtime": 54.3841, |
|
"eval_samples_per_second": 92.123, |
|
"eval_steps_per_second": 11.529, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9436111111111112e-07, |
|
"loss": 1.1125, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.35748502994011977, |
|
"eval_loss": 1.0954252481460571, |
|
"eval_runtime": 54.3975, |
|
"eval_samples_per_second": 92.1, |
|
"eval_steps_per_second": 11.526, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2213888888888891e-07, |
|
"loss": 1.1096, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.3664670658682635, |
|
"eval_loss": 1.0949230194091797, |
|
"eval_runtime": 54.3263, |
|
"eval_samples_per_second": 92.221, |
|
"eval_steps_per_second": 11.541, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.4988888888888893e-07, |
|
"loss": 1.1057, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.34890219560878244, |
|
"eval_loss": 1.1037237644195557, |
|
"eval_runtime": 54.3657, |
|
"eval_samples_per_second": 92.154, |
|
"eval_steps_per_second": 11.533, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.776666666666667e-07, |
|
"loss": 1.1056, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.3838323353293413, |
|
"eval_loss": 1.0928014516830444, |
|
"eval_runtime": 54.6518, |
|
"eval_samples_per_second": 91.671, |
|
"eval_steps_per_second": 11.473, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.054444444444444e-07, |
|
"loss": 1.1036, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.3838323353293413, |
|
"eval_loss": 1.0908663272857666, |
|
"eval_runtime": 54.579, |
|
"eval_samples_per_second": 91.794, |
|
"eval_steps_per_second": 11.488, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.3322222222222225e-07, |
|
"loss": 1.1012, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.3872255489021956, |
|
"eval_loss": 1.0890672206878662, |
|
"eval_runtime": 54.5139, |
|
"eval_samples_per_second": 91.903, |
|
"eval_steps_per_second": 11.502, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.609722222222222e-07, |
|
"loss": 1.103, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.34311377245508984, |
|
"eval_loss": 1.0942790508270264, |
|
"eval_runtime": 54.3951, |
|
"eval_samples_per_second": 92.104, |
|
"eval_steps_per_second": 11.527, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.8875e-07, |
|
"loss": 1.0997, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.3712574850299401, |
|
"eval_loss": 1.0931994915008545, |
|
"eval_runtime": 54.3749, |
|
"eval_samples_per_second": 92.138, |
|
"eval_steps_per_second": 11.531, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.1650000000000006e-07, |
|
"loss": 1.0993, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.3872255489021956, |
|
"eval_loss": 1.0887969732284546, |
|
"eval_runtime": 54.4149, |
|
"eval_samples_per_second": 92.07, |
|
"eval_steps_per_second": 11.523, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.4427777777777783e-07, |
|
"loss": 1.0983, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.3980039920159681, |
|
"eval_loss": 1.0836474895477295, |
|
"eval_runtime": 54.3692, |
|
"eval_samples_per_second": 92.148, |
|
"eval_steps_per_second": 11.532, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7202777777777785e-07, |
|
"loss": 1.0954, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.3996007984031936, |
|
"eval_loss": 1.0822749137878418, |
|
"eval_runtime": 54.3313, |
|
"eval_samples_per_second": 92.212, |
|
"eval_steps_per_second": 11.54, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.998055555555556e-07, |
|
"loss": 1.097, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.4033932135728543, |
|
"eval_loss": 1.0845097303390503, |
|
"eval_runtime": 54.3565, |
|
"eval_samples_per_second": 92.169, |
|
"eval_steps_per_second": 11.535, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.275555555555556e-07, |
|
"loss": 1.0942, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.4039920159680639, |
|
"eval_loss": 1.0809950828552246, |
|
"eval_runtime": 54.3973, |
|
"eval_samples_per_second": 92.1, |
|
"eval_steps_per_second": 11.526, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.553333333333334e-07, |
|
"loss": 1.0921, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.4055888223552894, |
|
"eval_loss": 1.0818568468093872, |
|
"eval_runtime": 54.8395, |
|
"eval_samples_per_second": 91.358, |
|
"eval_steps_per_second": 11.433, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.830833333333334e-07, |
|
"loss": 1.0914, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.38163672654690617, |
|
"eval_loss": 1.08183753490448, |
|
"eval_runtime": 54.4076, |
|
"eval_samples_per_second": 92.083, |
|
"eval_steps_per_second": 11.524, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.108611111111111e-07, |
|
"loss": 1.091, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.40658682634730536, |
|
"eval_loss": 1.0757951736450195, |
|
"eval_runtime": 54.4113, |
|
"eval_samples_per_second": 92.077, |
|
"eval_steps_per_second": 11.523, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.386388888888889e-07, |
|
"loss": 1.0856, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.43313373253493015, |
|
"eval_loss": 1.0659974813461304, |
|
"eval_runtime": 54.3958, |
|
"eval_samples_per_second": 92.103, |
|
"eval_steps_per_second": 11.527, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.663888888888889e-07, |
|
"loss": 1.0794, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.43952095808383235, |
|
"eval_loss": 1.056550145149231, |
|
"eval_runtime": 54.6408, |
|
"eval_samples_per_second": 91.69, |
|
"eval_steps_per_second": 11.475, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.941666666666667e-07, |
|
"loss": 1.0714, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.4411177644710579, |
|
"eval_loss": 1.0531866550445557, |
|
"eval_runtime": 54.5122, |
|
"eval_samples_per_second": 91.906, |
|
"eval_steps_per_second": 11.502, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 7.219166666666666e-07, |
|
"loss": 1.0703, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.43273453093812375, |
|
"eval_loss": 1.0532232522964478, |
|
"eval_runtime": 54.5098, |
|
"eval_samples_per_second": 91.91, |
|
"eval_steps_per_second": 11.503, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.496944444444444e-07, |
|
"loss": 1.0682, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.4353293413173653, |
|
"eval_loss": 1.050220012664795, |
|
"eval_runtime": 54.3702, |
|
"eval_samples_per_second": 92.146, |
|
"eval_steps_per_second": 11.532, |
|
"step": 27000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 10000000, |
|
"num_train_epochs": 408, |
|
"save_steps": 1000, |
|
"total_flos": 5.64393272255447e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|