|
{ |
|
"best_metric": 0.46430692076683044, |
|
"best_model_checkpoint": "/home/datawork-iot-nos/Seatizen/models/multilabel/drone/drone-DinoVdeau-from-probs-large-2024_11_14-batch-size16_freeze_probs/checkpoint-36792", |
|
"epoch": 94.0, |
|
"eval_steps": 500, |
|
"global_step": 41172, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_explained_variance": 0.3296825587749481, |
|
"eval_kl_divergence": 0.9903448224067688, |
|
"eval_loss": 0.4933677613735199, |
|
"eval_mae": 0.129361093044281, |
|
"eval_rmse": 0.18249033391475677, |
|
"eval_runtime": 70.7501, |
|
"eval_samples_per_second": 33.272, |
|
"eval_steps_per_second": 2.092, |
|
"learning_rate": 0.001, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 1.1415525114155252, |
|
"grad_norm": 0.5407673120498657, |
|
"learning_rate": 0.001, |
|
"loss": 0.5313, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_explained_variance": 0.37307724356651306, |
|
"eval_kl_divergence": 0.6847184896469116, |
|
"eval_loss": 0.47886165976524353, |
|
"eval_mae": 0.12624560296535492, |
|
"eval_rmse": 0.1715712547302246, |
|
"eval_runtime": 71.8777, |
|
"eval_samples_per_second": 32.75, |
|
"eval_steps_per_second": 2.059, |
|
"learning_rate": 0.001, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 2.2831050228310503, |
|
"grad_norm": 0.43918001651763916, |
|
"learning_rate": 0.001, |
|
"loss": 0.4831, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_explained_variance": 0.38236939907073975, |
|
"eval_kl_divergence": 0.5498285293579102, |
|
"eval_loss": 0.4788369834423065, |
|
"eval_mae": 0.12710730731487274, |
|
"eval_rmse": 0.1709037572145462, |
|
"eval_runtime": 66.8689, |
|
"eval_samples_per_second": 35.203, |
|
"eval_steps_per_second": 2.213, |
|
"learning_rate": 0.001, |
|
"step": 1314 |
|
}, |
|
{ |
|
"epoch": 3.4246575342465753, |
|
"grad_norm": 0.28072813153266907, |
|
"learning_rate": 0.001, |
|
"loss": 0.4773, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_explained_variance": 0.39789319038391113, |
|
"eval_kl_divergence": 0.31306192278862, |
|
"eval_loss": 0.47656363248825073, |
|
"eval_mae": 0.12784750759601593, |
|
"eval_rmse": 0.16946662962436676, |
|
"eval_runtime": 68.7002, |
|
"eval_samples_per_second": 34.265, |
|
"eval_steps_per_second": 2.154, |
|
"learning_rate": 0.001, |
|
"step": 1752 |
|
}, |
|
{ |
|
"epoch": 4.566210045662101, |
|
"grad_norm": 0.23626942932605743, |
|
"learning_rate": 0.001, |
|
"loss": 0.476, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_explained_variance": 0.3970318138599396, |
|
"eval_kl_divergence": 0.40128740668296814, |
|
"eval_loss": 0.4765072166919708, |
|
"eval_mae": 0.12769053876399994, |
|
"eval_rmse": 0.1686621755361557, |
|
"eval_runtime": 69.4052, |
|
"eval_samples_per_second": 33.917, |
|
"eval_steps_per_second": 2.132, |
|
"learning_rate": 0.001, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 5.707762557077626, |
|
"grad_norm": 0.2325536459684372, |
|
"learning_rate": 0.001, |
|
"loss": 0.4746, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_explained_variance": 0.392393559217453, |
|
"eval_kl_divergence": 0.6369896531105042, |
|
"eval_loss": 0.47649845480918884, |
|
"eval_mae": 0.12429385632276535, |
|
"eval_rmse": 0.16892628371715546, |
|
"eval_runtime": 69.6508, |
|
"eval_samples_per_second": 33.797, |
|
"eval_steps_per_second": 2.125, |
|
"learning_rate": 0.001, |
|
"step": 2628 |
|
}, |
|
{ |
|
"epoch": 6.8493150684931505, |
|
"grad_norm": 0.1943524181842804, |
|
"learning_rate": 0.001, |
|
"loss": 0.4738, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_explained_variance": 0.3910720646381378, |
|
"eval_kl_divergence": 0.43142321705818176, |
|
"eval_loss": 0.47629299759864807, |
|
"eval_mae": 0.1291760504245758, |
|
"eval_rmse": 0.16937647759914398, |
|
"eval_runtime": 68.3166, |
|
"eval_samples_per_second": 34.457, |
|
"eval_steps_per_second": 2.166, |
|
"learning_rate": 0.001, |
|
"step": 3066 |
|
}, |
|
{ |
|
"epoch": 7.9908675799086755, |
|
"grad_norm": 0.161671444773674, |
|
"learning_rate": 0.001, |
|
"loss": 0.4727, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_explained_variance": 0.407635897397995, |
|
"eval_kl_divergence": 0.33790889382362366, |
|
"eval_loss": 0.4755041003227234, |
|
"eval_mae": 0.1266549676656723, |
|
"eval_rmse": 0.1681331843137741, |
|
"eval_runtime": 71.2607, |
|
"eval_samples_per_second": 33.034, |
|
"eval_steps_per_second": 2.077, |
|
"learning_rate": 0.001, |
|
"step": 3504 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_explained_variance": 0.40718480944633484, |
|
"eval_kl_divergence": 0.4916018843650818, |
|
"eval_loss": 0.47342246770858765, |
|
"eval_mae": 0.1250395029783249, |
|
"eval_rmse": 0.16618509590625763, |
|
"eval_runtime": 68.5459, |
|
"eval_samples_per_second": 34.342, |
|
"eval_steps_per_second": 2.159, |
|
"learning_rate": 0.001, |
|
"step": 3942 |
|
}, |
|
{ |
|
"epoch": 9.132420091324201, |
|
"grad_norm": 0.19357167184352875, |
|
"learning_rate": 0.001, |
|
"loss": 0.4715, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_explained_variance": 0.40620386600494385, |
|
"eval_kl_divergence": 0.33481186628341675, |
|
"eval_loss": 0.47546806931495667, |
|
"eval_mae": 0.1277414709329605, |
|
"eval_rmse": 0.167744979262352, |
|
"eval_runtime": 67.1085, |
|
"eval_samples_per_second": 35.078, |
|
"eval_steps_per_second": 2.205, |
|
"learning_rate": 0.001, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 10.273972602739725, |
|
"grad_norm": 0.17573006451129913, |
|
"learning_rate": 0.001, |
|
"loss": 0.4714, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_explained_variance": 0.41538166999816895, |
|
"eval_kl_divergence": 0.35237056016921997, |
|
"eval_loss": 0.4731104075908661, |
|
"eval_mae": 0.12549267709255219, |
|
"eval_rmse": 0.1659194976091385, |
|
"eval_runtime": 67.2129, |
|
"eval_samples_per_second": 35.023, |
|
"eval_steps_per_second": 2.202, |
|
"learning_rate": 0.001, |
|
"step": 4818 |
|
}, |
|
{ |
|
"epoch": 11.415525114155251, |
|
"grad_norm": 0.13790665566921234, |
|
"learning_rate": 0.001, |
|
"loss": 0.4713, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_explained_variance": 0.41025033593177795, |
|
"eval_kl_divergence": 0.23825553059577942, |
|
"eval_loss": 0.47679492831230164, |
|
"eval_mae": 0.13060903549194336, |
|
"eval_rmse": 0.16900604963302612, |
|
"eval_runtime": 68.7453, |
|
"eval_samples_per_second": 34.242, |
|
"eval_steps_per_second": 2.153, |
|
"learning_rate": 0.001, |
|
"step": 5256 |
|
}, |
|
{ |
|
"epoch": 12.557077625570777, |
|
"grad_norm": 0.1497349739074707, |
|
"learning_rate": 0.001, |
|
"loss": 0.4722, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_explained_variance": 0.40282678604125977, |
|
"eval_kl_divergence": 0.696779727935791, |
|
"eval_loss": 0.4736888110637665, |
|
"eval_mae": 0.12227921932935715, |
|
"eval_rmse": 0.16662709414958954, |
|
"eval_runtime": 66.9685, |
|
"eval_samples_per_second": 35.151, |
|
"eval_steps_per_second": 2.21, |
|
"learning_rate": 0.001, |
|
"step": 5694 |
|
}, |
|
{ |
|
"epoch": 13.698630136986301, |
|
"grad_norm": 0.11125459522008896, |
|
"learning_rate": 0.001, |
|
"loss": 0.472, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_explained_variance": 0.4099460542201996, |
|
"eval_kl_divergence": 0.3982764780521393, |
|
"eval_loss": 0.4736703634262085, |
|
"eval_mae": 0.12543360888957977, |
|
"eval_rmse": 0.16584907472133636, |
|
"eval_runtime": 70.7133, |
|
"eval_samples_per_second": 33.289, |
|
"eval_steps_per_second": 2.093, |
|
"learning_rate": 0.001, |
|
"step": 6132 |
|
}, |
|
{ |
|
"epoch": 14.840182648401827, |
|
"grad_norm": 0.13986219465732574, |
|
"learning_rate": 0.001, |
|
"loss": 0.4697, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_explained_variance": 0.40361204743385315, |
|
"eval_kl_divergence": 0.5619756579399109, |
|
"eval_loss": 0.4738818407058716, |
|
"eval_mae": 0.12480553239583969, |
|
"eval_rmse": 0.16644911468029022, |
|
"eval_runtime": 67.989, |
|
"eval_samples_per_second": 34.623, |
|
"eval_steps_per_second": 2.177, |
|
"learning_rate": 0.001, |
|
"step": 6570 |
|
}, |
|
{ |
|
"epoch": 15.981735159817351, |
|
"grad_norm": 0.10489701479673386, |
|
"learning_rate": 0.001, |
|
"loss": 0.4721, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_explained_variance": 0.41589656472206116, |
|
"eval_kl_divergence": 0.6049214601516724, |
|
"eval_loss": 0.47202879190444946, |
|
"eval_mae": 0.12306753545999527, |
|
"eval_rmse": 0.1648208647966385, |
|
"eval_runtime": 68.6318, |
|
"eval_samples_per_second": 34.299, |
|
"eval_steps_per_second": 2.156, |
|
"learning_rate": 0.001, |
|
"step": 7008 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_explained_variance": 0.41705062985420227, |
|
"eval_kl_divergence": 0.3072437644004822, |
|
"eval_loss": 0.47406336665153503, |
|
"eval_mae": 0.12645679712295532, |
|
"eval_rmse": 0.16636626422405243, |
|
"eval_runtime": 67.693, |
|
"eval_samples_per_second": 34.775, |
|
"eval_steps_per_second": 2.186, |
|
"learning_rate": 0.001, |
|
"step": 7446 |
|
}, |
|
{ |
|
"epoch": 17.123287671232877, |
|
"grad_norm": 0.11510255187749863, |
|
"learning_rate": 0.001, |
|
"loss": 0.4709, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_explained_variance": 0.4238598346710205, |
|
"eval_kl_divergence": 0.3350007236003876, |
|
"eval_loss": 0.4738321006298065, |
|
"eval_mae": 0.12525837123394012, |
|
"eval_rmse": 0.1649676412343979, |
|
"eval_runtime": 68.1862, |
|
"eval_samples_per_second": 34.523, |
|
"eval_steps_per_second": 2.171, |
|
"learning_rate": 0.001, |
|
"step": 7884 |
|
}, |
|
{ |
|
"epoch": 18.264840182648403, |
|
"grad_norm": 0.1282125860452652, |
|
"learning_rate": 0.001, |
|
"loss": 0.4711, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_explained_variance": 0.4161747395992279, |
|
"eval_kl_divergence": 0.2745565176010132, |
|
"eval_loss": 0.476326048374176, |
|
"eval_mae": 0.12823572754859924, |
|
"eval_rmse": 0.16715575754642487, |
|
"eval_runtime": 68.1658, |
|
"eval_samples_per_second": 34.533, |
|
"eval_steps_per_second": 2.171, |
|
"learning_rate": 0.001, |
|
"step": 8322 |
|
}, |
|
{ |
|
"epoch": 19.40639269406393, |
|
"grad_norm": 0.15439559519290924, |
|
"learning_rate": 0.001, |
|
"loss": 0.4696, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_explained_variance": 0.40602585673332214, |
|
"eval_kl_divergence": 0.5658758878707886, |
|
"eval_loss": 0.4755523204803467, |
|
"eval_mae": 0.12445954233407974, |
|
"eval_rmse": 0.1669723242521286, |
|
"eval_runtime": 66.5326, |
|
"eval_samples_per_second": 35.381, |
|
"eval_steps_per_second": 2.224, |
|
"learning_rate": 0.001, |
|
"step": 8760 |
|
}, |
|
{ |
|
"epoch": 20.54794520547945, |
|
"grad_norm": 0.1288071572780609, |
|
"learning_rate": 0.001, |
|
"loss": 0.4715, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_explained_variance": 0.40606603026390076, |
|
"eval_kl_divergence": 0.6153907179832458, |
|
"eval_loss": 0.47340598702430725, |
|
"eval_mae": 0.12303853780031204, |
|
"eval_rmse": 0.16621015965938568, |
|
"eval_runtime": 66.3328, |
|
"eval_samples_per_second": 35.488, |
|
"eval_steps_per_second": 2.231, |
|
"learning_rate": 0.001, |
|
"step": 9198 |
|
}, |
|
{ |
|
"epoch": 21.689497716894977, |
|
"grad_norm": 0.13872142136096954, |
|
"learning_rate": 0.001, |
|
"loss": 0.4714, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_explained_variance": 0.402675986289978, |
|
"eval_kl_divergence": 0.7973779439926147, |
|
"eval_loss": 0.47443991899490356, |
|
"eval_mae": 0.12229780107736588, |
|
"eval_rmse": 0.16771160066127777, |
|
"eval_runtime": 65.4256, |
|
"eval_samples_per_second": 35.98, |
|
"eval_steps_per_second": 2.262, |
|
"learning_rate": 0.001, |
|
"step": 9636 |
|
}, |
|
{ |
|
"epoch": 22.831050228310502, |
|
"grad_norm": 0.12462610751390457, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4697, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_explained_variance": 0.43371883034706116, |
|
"eval_kl_divergence": 0.2306971251964569, |
|
"eval_loss": 0.47205689549446106, |
|
"eval_mae": 0.12522436678409576, |
|
"eval_rmse": 0.16393429040908813, |
|
"eval_runtime": 67.2945, |
|
"eval_samples_per_second": 34.981, |
|
"eval_steps_per_second": 2.199, |
|
"learning_rate": 0.0001, |
|
"step": 10074 |
|
}, |
|
{ |
|
"epoch": 23.972602739726028, |
|
"grad_norm": 0.15716814994812012, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4653, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_explained_variance": 0.43139490485191345, |
|
"eval_kl_divergence": 0.42188531160354614, |
|
"eval_loss": 0.4706146717071533, |
|
"eval_mae": 0.12165608257055283, |
|
"eval_rmse": 0.1630544662475586, |
|
"eval_runtime": 69.0079, |
|
"eval_samples_per_second": 34.112, |
|
"eval_steps_per_second": 2.145, |
|
"learning_rate": 0.0001, |
|
"step": 10512 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_explained_variance": 0.4370768964290619, |
|
"eval_kl_divergence": 0.5242214798927307, |
|
"eval_loss": 0.46876564621925354, |
|
"eval_mae": 0.11949952691793442, |
|
"eval_rmse": 0.16123180091381073, |
|
"eval_runtime": 67.8342, |
|
"eval_samples_per_second": 34.702, |
|
"eval_steps_per_second": 2.182, |
|
"learning_rate": 0.0001, |
|
"step": 10950 |
|
}, |
|
{ |
|
"epoch": 25.114155251141554, |
|
"grad_norm": 0.12071619927883148, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4665, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_explained_variance": 0.43381467461586, |
|
"eval_kl_divergence": 0.6159161925315857, |
|
"eval_loss": 0.46925392746925354, |
|
"eval_mae": 0.11895165592432022, |
|
"eval_rmse": 0.16197769343852997, |
|
"eval_runtime": 66.3554, |
|
"eval_samples_per_second": 35.476, |
|
"eval_steps_per_second": 2.23, |
|
"learning_rate": 0.0001, |
|
"step": 11388 |
|
}, |
|
{ |
|
"epoch": 26.255707762557076, |
|
"grad_norm": 0.13487227261066437, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4638, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_explained_variance": 0.4415856897830963, |
|
"eval_kl_divergence": 0.4046337604522705, |
|
"eval_loss": 0.46849024295806885, |
|
"eval_mae": 0.12057051062583923, |
|
"eval_rmse": 0.16070334613323212, |
|
"eval_runtime": 67.413, |
|
"eval_samples_per_second": 34.919, |
|
"eval_steps_per_second": 2.195, |
|
"learning_rate": 0.0001, |
|
"step": 11826 |
|
}, |
|
{ |
|
"epoch": 27.397260273972602, |
|
"grad_norm": 0.122219979763031, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4647, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_explained_variance": 0.4443088471889496, |
|
"eval_kl_divergence": 0.28601282835006714, |
|
"eval_loss": 0.46939656138420105, |
|
"eval_mae": 0.12197214365005493, |
|
"eval_rmse": 0.16160771250724792, |
|
"eval_runtime": 68.24, |
|
"eval_samples_per_second": 34.496, |
|
"eval_steps_per_second": 2.169, |
|
"learning_rate": 0.0001, |
|
"step": 12264 |
|
}, |
|
{ |
|
"epoch": 28.538812785388128, |
|
"grad_norm": 0.1166076809167862, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4644, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_explained_variance": 0.44008609652519226, |
|
"eval_kl_divergence": 0.42698290944099426, |
|
"eval_loss": 0.46892231702804565, |
|
"eval_mae": 0.11971781402826309, |
|
"eval_rmse": 0.16136477887630463, |
|
"eval_runtime": 67.0859, |
|
"eval_samples_per_second": 35.089, |
|
"eval_steps_per_second": 2.206, |
|
"learning_rate": 0.0001, |
|
"step": 12702 |
|
}, |
|
{ |
|
"epoch": 29.680365296803654, |
|
"grad_norm": 0.1203789934515953, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4638, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_explained_variance": 0.44359269738197327, |
|
"eval_kl_divergence": 0.2625378370285034, |
|
"eval_loss": 0.46987923979759216, |
|
"eval_mae": 0.12246056646108627, |
|
"eval_rmse": 0.16186781227588654, |
|
"eval_runtime": 67.1358, |
|
"eval_samples_per_second": 35.063, |
|
"eval_steps_per_second": 2.204, |
|
"learning_rate": 0.0001, |
|
"step": 13140 |
|
}, |
|
{ |
|
"epoch": 30.82191780821918, |
|
"grad_norm": 0.11070472747087479, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4636, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_explained_variance": 0.44310399889945984, |
|
"eval_kl_divergence": 0.38758543133735657, |
|
"eval_loss": 0.46842578053474426, |
|
"eval_mae": 0.11974402517080307, |
|
"eval_rmse": 0.16068722307682037, |
|
"eval_runtime": 68.317, |
|
"eval_samples_per_second": 34.457, |
|
"eval_steps_per_second": 2.166, |
|
"learning_rate": 0.0001, |
|
"step": 13578 |
|
}, |
|
{ |
|
"epoch": 31.963470319634702, |
|
"grad_norm": 0.18743829429149628, |
|
"learning_rate": 0.0001, |
|
"loss": 0.463, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_explained_variance": 0.4466596841812134, |
|
"eval_kl_divergence": 0.4060189425945282, |
|
"eval_loss": 0.46784707903862, |
|
"eval_mae": 0.11951278895139694, |
|
"eval_rmse": 0.16003534197807312, |
|
"eval_runtime": 66.0349, |
|
"eval_samples_per_second": 35.648, |
|
"eval_steps_per_second": 2.241, |
|
"learning_rate": 0.0001, |
|
"step": 14016 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_explained_variance": 0.44936656951904297, |
|
"eval_kl_divergence": 0.3688030540943146, |
|
"eval_loss": 0.46755874156951904, |
|
"eval_mae": 0.11934500932693481, |
|
"eval_rmse": 0.15963692963123322, |
|
"eval_runtime": 72.5096, |
|
"eval_samples_per_second": 32.465, |
|
"eval_steps_per_second": 2.041, |
|
"learning_rate": 0.0001, |
|
"step": 14454 |
|
}, |
|
{ |
|
"epoch": 33.10502283105023, |
|
"grad_norm": 0.15971647202968597, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4628, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_explained_variance": 0.4491260051727295, |
|
"eval_kl_divergence": 0.3900001645088196, |
|
"eval_loss": 0.46766504645347595, |
|
"eval_mae": 0.11943826079368591, |
|
"eval_rmse": 0.15995797514915466, |
|
"eval_runtime": 67.335, |
|
"eval_samples_per_second": 34.96, |
|
"eval_steps_per_second": 2.198, |
|
"learning_rate": 0.0001, |
|
"step": 14892 |
|
}, |
|
{ |
|
"epoch": 34.24657534246575, |
|
"grad_norm": 0.15495429933071136, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4616, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_explained_variance": 0.44995394349098206, |
|
"eval_kl_divergence": 0.42819926142692566, |
|
"eval_loss": 0.4670174717903137, |
|
"eval_mae": 0.11885016411542892, |
|
"eval_rmse": 0.1593056619167328, |
|
"eval_runtime": 69.6729, |
|
"eval_samples_per_second": 33.786, |
|
"eval_steps_per_second": 2.124, |
|
"learning_rate": 0.0001, |
|
"step": 15330 |
|
}, |
|
{ |
|
"epoch": 35.38812785388128, |
|
"grad_norm": 0.1067986786365509, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4634, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_explained_variance": 0.45064660906791687, |
|
"eval_kl_divergence": 0.44459816813468933, |
|
"eval_loss": 0.46679624915122986, |
|
"eval_mae": 0.11801303178071976, |
|
"eval_rmse": 0.15909221768379211, |
|
"eval_runtime": 70.3703, |
|
"eval_samples_per_second": 33.452, |
|
"eval_steps_per_second": 2.103, |
|
"learning_rate": 0.0001, |
|
"step": 15768 |
|
}, |
|
{ |
|
"epoch": 36.529680365296805, |
|
"grad_norm": 0.17229855060577393, |
|
"learning_rate": 0.0001, |
|
"loss": 0.462, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_explained_variance": 0.4528321325778961, |
|
"eval_kl_divergence": 0.39424213767051697, |
|
"eval_loss": 0.46689239144325256, |
|
"eval_mae": 0.11852964758872986, |
|
"eval_rmse": 0.15902292728424072, |
|
"eval_runtime": 68.7832, |
|
"eval_samples_per_second": 34.223, |
|
"eval_steps_per_second": 2.152, |
|
"learning_rate": 0.0001, |
|
"step": 16206 |
|
}, |
|
{ |
|
"epoch": 37.67123287671233, |
|
"grad_norm": 0.16489605605602264, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4631, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_explained_variance": 0.4512017071247101, |
|
"eval_kl_divergence": 0.47831401228904724, |
|
"eval_loss": 0.4664570987224579, |
|
"eval_mae": 0.11769810318946838, |
|
"eval_rmse": 0.15879899263381958, |
|
"eval_runtime": 69.0328, |
|
"eval_samples_per_second": 34.1, |
|
"eval_steps_per_second": 2.144, |
|
"learning_rate": 0.0001, |
|
"step": 16644 |
|
}, |
|
{ |
|
"epoch": 38.81278538812786, |
|
"grad_norm": 0.24003809690475464, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4603, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_explained_variance": 0.4500379264354706, |
|
"eval_kl_divergence": 0.386750727891922, |
|
"eval_loss": 0.4674011468887329, |
|
"eval_mae": 0.11898898333311081, |
|
"eval_rmse": 0.15971434116363525, |
|
"eval_runtime": 69.047, |
|
"eval_samples_per_second": 34.093, |
|
"eval_steps_per_second": 2.143, |
|
"learning_rate": 0.0001, |
|
"step": 17082 |
|
}, |
|
{ |
|
"epoch": 39.954337899543376, |
|
"grad_norm": 0.1894679218530655, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4614, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_explained_variance": 0.4498337507247925, |
|
"eval_kl_divergence": 0.3627259135246277, |
|
"eval_loss": 0.4677062928676605, |
|
"eval_mae": 0.11947864294052124, |
|
"eval_rmse": 0.15986855328083038, |
|
"eval_runtime": 68.3668, |
|
"eval_samples_per_second": 34.432, |
|
"eval_steps_per_second": 2.165, |
|
"learning_rate": 0.0001, |
|
"step": 17520 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_explained_variance": 0.4539731740951538, |
|
"eval_kl_divergence": 0.2654862701892853, |
|
"eval_loss": 0.46822381019592285, |
|
"eval_mae": 0.12105310708284378, |
|
"eval_rmse": 0.16022303700447083, |
|
"eval_runtime": 68.0126, |
|
"eval_samples_per_second": 34.611, |
|
"eval_steps_per_second": 2.176, |
|
"learning_rate": 0.0001, |
|
"step": 17958 |
|
}, |
|
{ |
|
"epoch": 41.0958904109589, |
|
"grad_norm": 0.13239748775959015, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4612, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_explained_variance": 0.45140552520751953, |
|
"eval_kl_divergence": 0.5071607828140259, |
|
"eval_loss": 0.4664672613143921, |
|
"eval_mae": 0.11723508685827255, |
|
"eval_rmse": 0.15891815721988678, |
|
"eval_runtime": 66.3853, |
|
"eval_samples_per_second": 35.46, |
|
"eval_steps_per_second": 2.229, |
|
"learning_rate": 0.0001, |
|
"step": 18396 |
|
}, |
|
{ |
|
"epoch": 42.23744292237443, |
|
"grad_norm": 0.17189915478229523, |
|
"learning_rate": 0.0001, |
|
"loss": 0.462, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_explained_variance": 0.4554661214351654, |
|
"eval_kl_divergence": 0.430560827255249, |
|
"eval_loss": 0.46638762950897217, |
|
"eval_mae": 0.11766376346349716, |
|
"eval_rmse": 0.15850575268268585, |
|
"eval_runtime": 65.8594, |
|
"eval_samples_per_second": 35.743, |
|
"eval_steps_per_second": 2.247, |
|
"learning_rate": 0.0001, |
|
"step": 18834 |
|
}, |
|
{ |
|
"epoch": 43.37899543378995, |
|
"grad_norm": 0.1566254049539566, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4603, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_explained_variance": 0.45294663310050964, |
|
"eval_kl_divergence": 0.4114503562450409, |
|
"eval_loss": 0.46708741784095764, |
|
"eval_mae": 0.11923228949308395, |
|
"eval_rmse": 0.15942519903182983, |
|
"eval_runtime": 70.2132, |
|
"eval_samples_per_second": 33.526, |
|
"eval_steps_per_second": 2.108, |
|
"learning_rate": 0.0001, |
|
"step": 19272 |
|
}, |
|
{ |
|
"epoch": 44.52054794520548, |
|
"grad_norm": 0.1487286388874054, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4599, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_explained_variance": 0.45276129245758057, |
|
"eval_kl_divergence": 0.441719114780426, |
|
"eval_loss": 0.46663177013397217, |
|
"eval_mae": 0.11713916063308716, |
|
"eval_rmse": 0.15901675820350647, |
|
"eval_runtime": 67.4478, |
|
"eval_samples_per_second": 34.901, |
|
"eval_steps_per_second": 2.194, |
|
"learning_rate": 0.0001, |
|
"step": 19710 |
|
}, |
|
{ |
|
"epoch": 45.662100456621005, |
|
"grad_norm": 0.17072182893753052, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4612, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_explained_variance": 0.4574218690395355, |
|
"eval_kl_divergence": 0.3686365485191345, |
|
"eval_loss": 0.4663327634334564, |
|
"eval_mae": 0.11786039918661118, |
|
"eval_rmse": 0.15854045748710632, |
|
"eval_runtime": 66.0464, |
|
"eval_samples_per_second": 35.642, |
|
"eval_steps_per_second": 2.241, |
|
"learning_rate": 0.0001, |
|
"step": 20148 |
|
}, |
|
{ |
|
"epoch": 46.80365296803653, |
|
"grad_norm": 0.17141404747962952, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4596, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_explained_variance": 0.4567171633243561, |
|
"eval_kl_divergence": 0.5089961886405945, |
|
"eval_loss": 0.46577510237693787, |
|
"eval_mae": 0.11723583936691284, |
|
"eval_rmse": 0.15815743803977966, |
|
"eval_runtime": 68.7513, |
|
"eval_samples_per_second": 34.239, |
|
"eval_steps_per_second": 2.153, |
|
"learning_rate": 0.0001, |
|
"step": 20586 |
|
}, |
|
{ |
|
"epoch": 47.945205479452056, |
|
"grad_norm": 0.13870340585708618, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4603, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_explained_variance": 0.4547780752182007, |
|
"eval_kl_divergence": 0.5279051065444946, |
|
"eval_loss": 0.46634000539779663, |
|
"eval_mae": 0.11751776933670044, |
|
"eval_rmse": 0.15890929102897644, |
|
"eval_runtime": 66.5777, |
|
"eval_samples_per_second": 35.357, |
|
"eval_steps_per_second": 2.223, |
|
"learning_rate": 0.0001, |
|
"step": 21024 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_explained_variance": 0.4532240927219391, |
|
"eval_kl_divergence": 0.44965481758117676, |
|
"eval_loss": 0.46656596660614014, |
|
"eval_mae": 0.11831226199865341, |
|
"eval_rmse": 0.15911053121089935, |
|
"eval_runtime": 67.2369, |
|
"eval_samples_per_second": 35.011, |
|
"eval_steps_per_second": 2.201, |
|
"learning_rate": 0.0001, |
|
"step": 21462 |
|
}, |
|
{ |
|
"epoch": 49.08675799086758, |
|
"grad_norm": 0.16372357308864594, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4599, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_explained_variance": 0.45799562335014343, |
|
"eval_kl_divergence": 0.27116483449935913, |
|
"eval_loss": 0.46755433082580566, |
|
"eval_mae": 0.1204712763428688, |
|
"eval_rmse": 0.15946535766124725, |
|
"eval_runtime": 67.5446, |
|
"eval_samples_per_second": 34.851, |
|
"eval_steps_per_second": 2.191, |
|
"learning_rate": 0.0001, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 50.22831050228311, |
|
"grad_norm": 0.15486378967761993, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4594, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"eval_explained_variance": 0.4551831781864166, |
|
"eval_kl_divergence": 0.4008127450942993, |
|
"eval_loss": 0.46639156341552734, |
|
"eval_mae": 0.11721646040678024, |
|
"eval_rmse": 0.15861637890338898, |
|
"eval_runtime": 64.0368, |
|
"eval_samples_per_second": 36.76, |
|
"eval_steps_per_second": 2.311, |
|
"learning_rate": 0.0001, |
|
"step": 22338 |
|
}, |
|
{ |
|
"epoch": 51.36986301369863, |
|
"grad_norm": 0.2131696492433548, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4593, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_explained_variance": 0.4556889832019806, |
|
"eval_kl_divergence": 0.4921821057796478, |
|
"eval_loss": 0.46591076254844666, |
|
"eval_mae": 0.11625734716653824, |
|
"eval_rmse": 0.1582706868648529, |
|
"eval_runtime": 68.9844, |
|
"eval_samples_per_second": 34.124, |
|
"eval_steps_per_second": 2.145, |
|
"learning_rate": 0.0001, |
|
"step": 22776 |
|
}, |
|
{ |
|
"epoch": 52.51141552511415, |
|
"grad_norm": 0.3151676654815674, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4614, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"eval_explained_variance": 0.4601232707500458, |
|
"eval_kl_divergence": 0.4273536205291748, |
|
"eval_loss": 0.4656851887702942, |
|
"eval_mae": 0.1177864745259285, |
|
"eval_rmse": 0.15793031454086304, |
|
"eval_runtime": 68.0941, |
|
"eval_samples_per_second": 34.57, |
|
"eval_steps_per_second": 2.173, |
|
"learning_rate": 0.0001, |
|
"step": 23214 |
|
}, |
|
{ |
|
"epoch": 53.65296803652968, |
|
"grad_norm": 0.1899169534444809, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4592, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_explained_variance": 0.456957072019577, |
|
"eval_kl_divergence": 0.4574394226074219, |
|
"eval_loss": 0.46629655361175537, |
|
"eval_mae": 0.11577478051185608, |
|
"eval_rmse": 0.15850554406642914, |
|
"eval_runtime": 65.2277, |
|
"eval_samples_per_second": 36.089, |
|
"eval_steps_per_second": 2.269, |
|
"learning_rate": 0.0001, |
|
"step": 23652 |
|
}, |
|
{ |
|
"epoch": 54.794520547945204, |
|
"grad_norm": 0.27214938402175903, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4601, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_explained_variance": 0.45804765820503235, |
|
"eval_kl_divergence": 0.34863969683647156, |
|
"eval_loss": 0.46644341945648193, |
|
"eval_mae": 0.11888903379440308, |
|
"eval_rmse": 0.1585645228624344, |
|
"eval_runtime": 62.989, |
|
"eval_samples_per_second": 37.372, |
|
"eval_steps_per_second": 2.35, |
|
"learning_rate": 0.0001, |
|
"step": 24090 |
|
}, |
|
{ |
|
"epoch": 55.93607305936073, |
|
"grad_norm": 0.32561782002449036, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4589, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_explained_variance": 0.46123751997947693, |
|
"eval_kl_divergence": 0.3015596568584442, |
|
"eval_loss": 0.4661739766597748, |
|
"eval_mae": 0.1184321939945221, |
|
"eval_rmse": 0.15838663280010223, |
|
"eval_runtime": 62.9298, |
|
"eval_samples_per_second": 37.407, |
|
"eval_steps_per_second": 2.352, |
|
"learning_rate": 0.0001, |
|
"step": 24528 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"eval_explained_variance": 0.45534956455230713, |
|
"eval_kl_divergence": 0.4162982106208801, |
|
"eval_loss": 0.46634721755981445, |
|
"eval_mae": 0.11808174103498459, |
|
"eval_rmse": 0.15866883099079132, |
|
"eval_runtime": 63.3411, |
|
"eval_samples_per_second": 37.164, |
|
"eval_steps_per_second": 2.337, |
|
"learning_rate": 0.0001, |
|
"step": 24966 |
|
}, |
|
{ |
|
"epoch": 57.077625570776256, |
|
"grad_norm": 0.21059946715831757, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4588, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_explained_variance": 0.4557063579559326, |
|
"eval_kl_divergence": 0.3399294316768646, |
|
"eval_loss": 0.4673805236816406, |
|
"eval_mae": 0.11887022852897644, |
|
"eval_rmse": 0.15934236347675323, |
|
"eval_runtime": 65.9311, |
|
"eval_samples_per_second": 35.704, |
|
"eval_steps_per_second": 2.245, |
|
"learning_rate": 0.0001, |
|
"step": 25404 |
|
}, |
|
{ |
|
"epoch": 58.21917808219178, |
|
"grad_norm": 0.1977730244398117, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4595, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"eval_explained_variance": 0.4646029472351074, |
|
"eval_kl_divergence": 0.3685876429080963, |
|
"eval_loss": 0.4650005102157593, |
|
"eval_mae": 0.11696922034025192, |
|
"eval_rmse": 0.15720517933368683, |
|
"eval_runtime": 63.6041, |
|
"eval_samples_per_second": 37.01, |
|
"eval_steps_per_second": 2.327, |
|
"learning_rate": 0.0001, |
|
"step": 25842 |
|
}, |
|
{ |
|
"epoch": 59.36073059360731, |
|
"grad_norm": 0.22561948001384735, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4594, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_explained_variance": 0.4566896855831146, |
|
"eval_kl_divergence": 0.45347684621810913, |
|
"eval_loss": 0.46599113941192627, |
|
"eval_mae": 0.11718535423278809, |
|
"eval_rmse": 0.15840259194374084, |
|
"eval_runtime": 62.901, |
|
"eval_samples_per_second": 37.424, |
|
"eval_steps_per_second": 2.353, |
|
"learning_rate": 0.0001, |
|
"step": 26280 |
|
}, |
|
{ |
|
"epoch": 60.50228310502283, |
|
"grad_norm": 0.19251562654972076, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4599, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"eval_explained_variance": 0.4583926200866699, |
|
"eval_kl_divergence": 0.375108540058136, |
|
"eval_loss": 0.4662201702594757, |
|
"eval_mae": 0.11791958659887314, |
|
"eval_rmse": 0.1585090458393097, |
|
"eval_runtime": 61.9039, |
|
"eval_samples_per_second": 38.027, |
|
"eval_steps_per_second": 2.391, |
|
"learning_rate": 0.0001, |
|
"step": 26718 |
|
}, |
|
{ |
|
"epoch": 61.64383561643836, |
|
"grad_norm": 0.2111298143863678, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4584, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_explained_variance": 0.4587836265563965, |
|
"eval_kl_divergence": 0.35335177183151245, |
|
"eval_loss": 0.46614503860473633, |
|
"eval_mae": 0.11732319742441177, |
|
"eval_rmse": 0.15833334624767303, |
|
"eval_runtime": 63.6491, |
|
"eval_samples_per_second": 36.984, |
|
"eval_steps_per_second": 2.325, |
|
"learning_rate": 0.0001, |
|
"step": 27156 |
|
}, |
|
{ |
|
"epoch": 62.78538812785388, |
|
"grad_norm": 0.21951091289520264, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4575, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"eval_explained_variance": 0.4576869010925293, |
|
"eval_kl_divergence": 0.40475067496299744, |
|
"eval_loss": 0.4660026431083679, |
|
"eval_mae": 0.11627298593521118, |
|
"eval_rmse": 0.1582755148410797, |
|
"eval_runtime": 62.5657, |
|
"eval_samples_per_second": 37.624, |
|
"eval_steps_per_second": 2.366, |
|
"learning_rate": 0.0001, |
|
"step": 27594 |
|
}, |
|
{ |
|
"epoch": 63.926940639269404, |
|
"grad_norm": 0.21228961646556854, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4598, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_explained_variance": 0.46285080909729004, |
|
"eval_kl_divergence": 0.2471023052930832, |
|
"eval_loss": 0.46711620688438416, |
|
"eval_mae": 0.11878780275583267, |
|
"eval_rmse": 0.15883709490299225, |
|
"eval_runtime": 62.9069, |
|
"eval_samples_per_second": 37.42, |
|
"eval_steps_per_second": 2.353, |
|
"learning_rate": 0.0001, |
|
"step": 28032 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_explained_variance": 0.46037113666534424, |
|
"eval_kl_divergence": 0.4526395797729492, |
|
"eval_loss": 0.46536803245544434, |
|
"eval_mae": 0.11657585948705673, |
|
"eval_rmse": 0.15768831968307495, |
|
"eval_runtime": 65.6932, |
|
"eval_samples_per_second": 35.833, |
|
"eval_steps_per_second": 2.253, |
|
"learning_rate": 0.0001, |
|
"step": 28470 |
|
}, |
|
{ |
|
"epoch": 65.06849315068493, |
|
"grad_norm": 0.23406238853931427, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4582, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_explained_variance": 0.45918479561805725, |
|
"eval_kl_divergence": 0.5259271264076233, |
|
"eval_loss": 0.46573594212532043, |
|
"eval_mae": 0.11610028892755508, |
|
"eval_rmse": 0.15816400945186615, |
|
"eval_runtime": 63.8294, |
|
"eval_samples_per_second": 36.88, |
|
"eval_steps_per_second": 2.319, |
|
"learning_rate": 1e-05, |
|
"step": 28908 |
|
}, |
|
{ |
|
"epoch": 66.21004566210046, |
|
"grad_norm": 0.20290181040763855, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4592, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"eval_explained_variance": 0.4623439311981201, |
|
"eval_kl_divergence": 0.4251798987388611, |
|
"eval_loss": 0.4653942584991455, |
|
"eval_mae": 0.11725304275751114, |
|
"eval_rmse": 0.1573910117149353, |
|
"eval_runtime": 65.929, |
|
"eval_samples_per_second": 35.705, |
|
"eval_steps_per_second": 2.245, |
|
"learning_rate": 1e-05, |
|
"step": 29346 |
|
}, |
|
{ |
|
"epoch": 67.35159817351598, |
|
"grad_norm": 0.210421621799469, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4573, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_explained_variance": 0.46140334010124207, |
|
"eval_kl_divergence": 0.49885210394859314, |
|
"eval_loss": 0.46487176418304443, |
|
"eval_mae": 0.11542203277349472, |
|
"eval_rmse": 0.15722759068012238, |
|
"eval_runtime": 63.8681, |
|
"eval_samples_per_second": 36.857, |
|
"eval_steps_per_second": 2.317, |
|
"learning_rate": 1e-05, |
|
"step": 29784 |
|
}, |
|
{ |
|
"epoch": 68.4931506849315, |
|
"grad_norm": 0.3686019480228424, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4576, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"eval_explained_variance": 0.46442678570747375, |
|
"eval_kl_divergence": 0.40228068828582764, |
|
"eval_loss": 0.465110719203949, |
|
"eval_mae": 0.11607488244771957, |
|
"eval_rmse": 0.15701350569725037, |
|
"eval_runtime": 68.8094, |
|
"eval_samples_per_second": 34.21, |
|
"eval_steps_per_second": 2.151, |
|
"learning_rate": 1e-05, |
|
"step": 30222 |
|
}, |
|
{ |
|
"epoch": 69.63470319634703, |
|
"grad_norm": 0.3072531223297119, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4556, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_explained_variance": 0.4622003734111786, |
|
"eval_kl_divergence": 0.41175633668899536, |
|
"eval_loss": 0.466043084859848, |
|
"eval_mae": 0.1165715679526329, |
|
"eval_rmse": 0.15760619938373566, |
|
"eval_runtime": 67.6756, |
|
"eval_samples_per_second": 34.784, |
|
"eval_steps_per_second": 2.187, |
|
"learning_rate": 1e-05, |
|
"step": 30660 |
|
}, |
|
{ |
|
"epoch": 70.77625570776256, |
|
"grad_norm": 0.2429497092962265, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4591, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"eval_explained_variance": 0.4643818140029907, |
|
"eval_kl_divergence": 0.3074641227722168, |
|
"eval_loss": 0.46608996391296387, |
|
"eval_mae": 0.11772569268941879, |
|
"eval_rmse": 0.15784674882888794, |
|
"eval_runtime": 72.04, |
|
"eval_samples_per_second": 32.676, |
|
"eval_steps_per_second": 2.054, |
|
"learning_rate": 1e-05, |
|
"step": 31098 |
|
}, |
|
{ |
|
"epoch": 71.91780821917808, |
|
"grad_norm": 0.1753670871257782, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4563, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"eval_explained_variance": 0.46214058995246887, |
|
"eval_kl_divergence": 0.38358867168426514, |
|
"eval_loss": 0.46582332253456116, |
|
"eval_mae": 0.11707664281129837, |
|
"eval_rmse": 0.1579793244600296, |
|
"eval_runtime": 66.3861, |
|
"eval_samples_per_second": 35.459, |
|
"eval_steps_per_second": 2.229, |
|
"learning_rate": 1e-05, |
|
"step": 31536 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"eval_explained_variance": 0.464040070772171, |
|
"eval_kl_divergence": 0.45435720682144165, |
|
"eval_loss": 0.4648771584033966, |
|
"eval_mae": 0.11544410139322281, |
|
"eval_rmse": 0.15694700181484222, |
|
"eval_runtime": 64.6485, |
|
"eval_samples_per_second": 36.412, |
|
"eval_steps_per_second": 2.289, |
|
"learning_rate": 1e-05, |
|
"step": 31974 |
|
}, |
|
{ |
|
"epoch": 73.05936073059361, |
|
"grad_norm": 0.2539682686328888, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4577, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"eval_explained_variance": 0.46597161889076233, |
|
"eval_kl_divergence": 0.4537515938282013, |
|
"eval_loss": 0.46466848254203796, |
|
"eval_mae": 0.11628029495477676, |
|
"eval_rmse": 0.1567227691411972, |
|
"eval_runtime": 64.3001, |
|
"eval_samples_per_second": 36.61, |
|
"eval_steps_per_second": 2.302, |
|
"learning_rate": 1e-05, |
|
"step": 32412 |
|
}, |
|
{ |
|
"epoch": 74.20091324200914, |
|
"grad_norm": 0.19458268582820892, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4576, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_explained_variance": 0.46580255031585693, |
|
"eval_kl_divergence": 0.33480265736579895, |
|
"eval_loss": 0.46563470363616943, |
|
"eval_mae": 0.11664699763059616, |
|
"eval_rmse": 0.1572960466146469, |
|
"eval_runtime": 65.0705, |
|
"eval_samples_per_second": 36.176, |
|
"eval_steps_per_second": 2.274, |
|
"learning_rate": 1e-05, |
|
"step": 32850 |
|
}, |
|
{ |
|
"epoch": 75.34246575342466, |
|
"grad_norm": 0.23031042516231537, |
|
"learning_rate": 1e-05, |
|
"loss": 0.457, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"eval_explained_variance": 0.4644790589809418, |
|
"eval_kl_divergence": 0.49758121371269226, |
|
"eval_loss": 0.4647076725959778, |
|
"eval_mae": 0.11578535288572311, |
|
"eval_rmse": 0.15706199407577515, |
|
"eval_runtime": 64.4752, |
|
"eval_samples_per_second": 36.51, |
|
"eval_steps_per_second": 2.295, |
|
"learning_rate": 1e-05, |
|
"step": 33288 |
|
}, |
|
{ |
|
"epoch": 76.48401826484019, |
|
"grad_norm": 0.21923455595970154, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4574, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"eval_explained_variance": 0.46530094742774963, |
|
"eval_kl_divergence": 0.3934381902217865, |
|
"eval_loss": 0.4650570750236511, |
|
"eval_mae": 0.11632921546697617, |
|
"eval_rmse": 0.15704087913036346, |
|
"eval_runtime": 65.0363, |
|
"eval_samples_per_second": 36.195, |
|
"eval_steps_per_second": 2.276, |
|
"learning_rate": 1e-05, |
|
"step": 33726 |
|
}, |
|
{ |
|
"epoch": 77.62557077625571, |
|
"grad_norm": 0.27394065260887146, |
|
"learning_rate": 1e-05, |
|
"loss": 0.457, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"eval_explained_variance": 0.4654146730899811, |
|
"eval_kl_divergence": 0.39355090260505676, |
|
"eval_loss": 0.46495845913887024, |
|
"eval_mae": 0.11605050414800644, |
|
"eval_rmse": 0.15708379447460175, |
|
"eval_runtime": 64.2371, |
|
"eval_samples_per_second": 36.645, |
|
"eval_steps_per_second": 2.304, |
|
"learning_rate": 1e-05, |
|
"step": 34164 |
|
}, |
|
{ |
|
"epoch": 78.76712328767124, |
|
"grad_norm": 0.20854905247688293, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4566, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"eval_explained_variance": 0.4653468132019043, |
|
"eval_kl_divergence": 0.37590739130973816, |
|
"eval_loss": 0.46530288457870483, |
|
"eval_mae": 0.11588699370622635, |
|
"eval_rmse": 0.15725918114185333, |
|
"eval_runtime": 62.4443, |
|
"eval_samples_per_second": 37.698, |
|
"eval_steps_per_second": 2.37, |
|
"learning_rate": 1e-05, |
|
"step": 34602 |
|
}, |
|
{ |
|
"epoch": 79.90867579908675, |
|
"grad_norm": 0.21241584420204163, |
|
"learning_rate": 1e-05, |
|
"loss": 0.458, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_explained_variance": 0.46603894233703613, |
|
"eval_kl_divergence": 0.41887620091438293, |
|
"eval_loss": 0.4647064805030823, |
|
"eval_mae": 0.11623198539018631, |
|
"eval_rmse": 0.15669378638267517, |
|
"eval_runtime": 62.0038, |
|
"eval_samples_per_second": 37.965, |
|
"eval_steps_per_second": 2.387, |
|
"learning_rate": 1e-05, |
|
"step": 35040 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"eval_explained_variance": 0.4646754562854767, |
|
"eval_kl_divergence": 0.47510233521461487, |
|
"eval_loss": 0.46485888957977295, |
|
"eval_mae": 0.11576662212610245, |
|
"eval_rmse": 0.15710778534412384, |
|
"eval_runtime": 61.3344, |
|
"eval_samples_per_second": 38.38, |
|
"eval_steps_per_second": 2.413, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 35478 |
|
}, |
|
{ |
|
"epoch": 81.05022831050228, |
|
"grad_norm": 0.23253624141216278, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.456, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"eval_explained_variance": 0.4651080071926117, |
|
"eval_kl_divergence": 0.43354716897010803, |
|
"eval_loss": 0.4654049277305603, |
|
"eval_mae": 0.11612025648355484, |
|
"eval_rmse": 0.1572476029396057, |
|
"eval_runtime": 62.2235, |
|
"eval_samples_per_second": 37.831, |
|
"eval_steps_per_second": 2.379, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 35916 |
|
}, |
|
{ |
|
"epoch": 82.1917808219178, |
|
"grad_norm": 0.23126010596752167, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4564, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"eval_explained_variance": 0.46666058897972107, |
|
"eval_kl_divergence": 0.3906138837337494, |
|
"eval_loss": 0.46466442942619324, |
|
"eval_mae": 0.11605874449014664, |
|
"eval_rmse": 0.15662842988967896, |
|
"eval_runtime": 61.3486, |
|
"eval_samples_per_second": 38.371, |
|
"eval_steps_per_second": 2.412, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 36354 |
|
}, |
|
{ |
|
"epoch": 83.33333333333333, |
|
"grad_norm": 0.2322608381509781, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4575, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"eval_explained_variance": 0.46770432591438293, |
|
"eval_kl_divergence": 0.3855327367782593, |
|
"eval_loss": 0.46430692076683044, |
|
"eval_mae": 0.11573296785354614, |
|
"eval_rmse": 0.15642325580120087, |
|
"eval_runtime": 66.0549, |
|
"eval_samples_per_second": 35.637, |
|
"eval_steps_per_second": 2.241, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 36792 |
|
}, |
|
{ |
|
"epoch": 84.47488584474885, |
|
"grad_norm": 0.23277735710144043, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4557, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_explained_variance": 0.46685031056404114, |
|
"eval_kl_divergence": 0.3372012972831726, |
|
"eval_loss": 0.46528080105781555, |
|
"eval_mae": 0.11732091754674911, |
|
"eval_rmse": 0.1570904403924942, |
|
"eval_runtime": 64.5003, |
|
"eval_samples_per_second": 36.496, |
|
"eval_steps_per_second": 2.295, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 37230 |
|
}, |
|
{ |
|
"epoch": 85.61643835616438, |
|
"grad_norm": 0.40314728021621704, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4587, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"eval_explained_variance": 0.46858659386634827, |
|
"eval_kl_divergence": 0.29690617322921753, |
|
"eval_loss": 0.46546733379364014, |
|
"eval_mae": 0.11843166500329971, |
|
"eval_rmse": 0.15721069276332855, |
|
"eval_runtime": 63.2115, |
|
"eval_samples_per_second": 37.24, |
|
"eval_steps_per_second": 2.341, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 37668 |
|
}, |
|
{ |
|
"epoch": 86.7579908675799, |
|
"grad_norm": 0.2404189556837082, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4564, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"eval_explained_variance": 0.4670089781284332, |
|
"eval_kl_divergence": 0.3571859300136566, |
|
"eval_loss": 0.4651782214641571, |
|
"eval_mae": 0.11727560311555862, |
|
"eval_rmse": 0.15713386237621307, |
|
"eval_runtime": 63.9279, |
|
"eval_samples_per_second": 36.823, |
|
"eval_steps_per_second": 2.315, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 38106 |
|
}, |
|
{ |
|
"epoch": 87.89954337899543, |
|
"grad_norm": 0.27116990089416504, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4565, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"eval_explained_variance": 0.4626586139202118, |
|
"eval_kl_divergence": 0.5179023146629333, |
|
"eval_loss": 0.4655611217021942, |
|
"eval_mae": 0.11513540893793106, |
|
"eval_rmse": 0.15777353942394257, |
|
"eval_runtime": 64.6969, |
|
"eval_samples_per_second": 36.385, |
|
"eval_steps_per_second": 2.288, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 38544 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"eval_explained_variance": 0.4670461118221283, |
|
"eval_kl_divergence": 0.29475125670433044, |
|
"eval_loss": 0.4654468297958374, |
|
"eval_mae": 0.11768680065870285, |
|
"eval_rmse": 0.15736806392669678, |
|
"eval_runtime": 64.3025, |
|
"eval_samples_per_second": 36.608, |
|
"eval_steps_per_second": 2.302, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 38982 |
|
}, |
|
{ |
|
"epoch": 89.04109589041096, |
|
"grad_norm": 0.35173681378364563, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.4569, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_explained_variance": 0.46744585037231445, |
|
"eval_kl_divergence": 0.34272825717926025, |
|
"eval_loss": 0.4649873971939087, |
|
"eval_mae": 0.11670031398534775, |
|
"eval_rmse": 0.15687011182308197, |
|
"eval_runtime": 63.1964, |
|
"eval_samples_per_second": 37.249, |
|
"eval_steps_per_second": 2.342, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"step": 39420 |
|
}, |
|
{ |
|
"epoch": 90.18264840182648, |
|
"grad_norm": 0.27881479263305664, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"loss": 0.4561, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"eval_explained_variance": 0.4690658748149872, |
|
"eval_kl_divergence": 0.27898427844047546, |
|
"eval_loss": 0.4655340015888214, |
|
"eval_mae": 0.11734825372695923, |
|
"eval_rmse": 0.15720801055431366, |
|
"eval_runtime": 62.9781, |
|
"eval_samples_per_second": 37.378, |
|
"eval_steps_per_second": 2.35, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"step": 39858 |
|
}, |
|
{ |
|
"epoch": 91.32420091324201, |
|
"grad_norm": 0.18466131389141083, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"loss": 0.4575, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"eval_explained_variance": 0.4671539068222046, |
|
"eval_kl_divergence": 0.41531136631965637, |
|
"eval_loss": 0.4645555317401886, |
|
"eval_mae": 0.11530810594558716, |
|
"eval_rmse": 0.15662522614002228, |
|
"eval_runtime": 65.5509, |
|
"eval_samples_per_second": 35.911, |
|
"eval_steps_per_second": 2.258, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"step": 40296 |
|
}, |
|
{ |
|
"epoch": 92.46575342465754, |
|
"grad_norm": 0.18533748388290405, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"loss": 0.4569, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"eval_explained_variance": 0.464511513710022, |
|
"eval_kl_divergence": 0.46641936898231506, |
|
"eval_loss": 0.4648568034172058, |
|
"eval_mae": 0.11531826853752136, |
|
"eval_rmse": 0.1570696085691452, |
|
"eval_runtime": 64.6542, |
|
"eval_samples_per_second": 36.409, |
|
"eval_steps_per_second": 2.289, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"step": 40734 |
|
}, |
|
{ |
|
"epoch": 93.60730593607306, |
|
"grad_norm": 0.27212274074554443, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"loss": 0.456, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"eval_explained_variance": 0.4661947786808014, |
|
"eval_kl_divergence": 0.3858942687511444, |
|
"eval_loss": 0.4652610421180725, |
|
"eval_mae": 0.11592459678649902, |
|
"eval_rmse": 0.15684206783771515, |
|
"eval_runtime": 63.3717, |
|
"eval_samples_per_second": 37.146, |
|
"eval_steps_per_second": 2.335, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"step": 41172 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.0000000000000002e-07, |
|
"step": 41172, |
|
"total_flos": 9.743166860733663e+19, |
|
"train_loss": 0.46343856458701926, |
|
"train_runtime": 26810.1674, |
|
"train_samples_per_second": 39.181, |
|
"train_steps_per_second": 2.451 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 65700, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 150, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 10, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.743166860733663e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|