GBERT-BioM-Translation-large / trainer_state.json
amindada's picture
Upload folder using huggingface_hub
21a641c verified
raw
history blame
No virus
18.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 73198,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.966324216508648e-05,
"loss": 1.3516,
"step": 500
},
{
"epoch": 0.03,
"learning_rate": 4.93223858575371e-05,
"loss": 1.194,
"step": 1000
},
{
"epoch": 0.04,
"learning_rate": 4.8980846471215063e-05,
"loss": 1.1341,
"step": 1500
},
{
"epoch": 0.05,
"learning_rate": 4.863930708489303e-05,
"loss": 1.0893,
"step": 2000
},
{
"epoch": 0.07,
"learning_rate": 4.8297767698571e-05,
"loss": 1.0658,
"step": 2500
},
{
"epoch": 0.08,
"learning_rate": 4.795622831224897e-05,
"loss": 1.0419,
"step": 3000
},
{
"epoch": 0.1,
"learning_rate": 4.7615372004699584e-05,
"loss": 1.0196,
"step": 3500
},
{
"epoch": 0.11,
"learning_rate": 4.727383261837756e-05,
"loss": 1.0027,
"step": 4000
},
{
"epoch": 0.12,
"learning_rate": 4.6932293232055523e-05,
"loss": 0.9885,
"step": 4500
},
{
"epoch": 0.14,
"learning_rate": 4.659143692450614e-05,
"loss": 0.9824,
"step": 5000
},
{
"epoch": 0.15,
"learning_rate": 4.6249897538184105e-05,
"loss": 0.964,
"step": 5500
},
{
"epoch": 0.16,
"learning_rate": 4.590835815186207e-05,
"loss": 0.9533,
"step": 6000
},
{
"epoch": 0.18,
"learning_rate": 4.5566818765540044e-05,
"loss": 0.9401,
"step": 6500
},
{
"epoch": 0.19,
"learning_rate": 4.522527937921802e-05,
"loss": 0.9334,
"step": 7000
},
{
"epoch": 0.2,
"learning_rate": 4.4883739992895983e-05,
"loss": 0.923,
"step": 7500
},
{
"epoch": 0.22,
"learning_rate": 4.454220060657395e-05,
"loss": 0.9165,
"step": 8000
},
{
"epoch": 0.23,
"learning_rate": 4.420066122025192e-05,
"loss": 0.9078,
"step": 8500
},
{
"epoch": 0.25,
"learning_rate": 4.385912183392989e-05,
"loss": 0.9012,
"step": 9000
},
{
"epoch": 0.26,
"learning_rate": 4.3517582447607855e-05,
"loss": 0.8927,
"step": 9500
},
{
"epoch": 0.27,
"learning_rate": 4.3176043061285835e-05,
"loss": 0.8878,
"step": 10000
},
{
"epoch": 0.29,
"learning_rate": 4.28345036749638e-05,
"loss": 0.8819,
"step": 10500
},
{
"epoch": 0.3,
"learning_rate": 4.249296428864177e-05,
"loss": 0.8744,
"step": 11000
},
{
"epoch": 0.31,
"learning_rate": 4.215142490231973e-05,
"loss": 0.8733,
"step": 11500
},
{
"epoch": 0.33,
"learning_rate": 4.1809885515997706e-05,
"loss": 0.8747,
"step": 12000
},
{
"epoch": 0.34,
"learning_rate": 4.1469029208448315e-05,
"loss": 0.8709,
"step": 12500
},
{
"epoch": 0.36,
"learning_rate": 4.1127489822126295e-05,
"loss": 0.8651,
"step": 13000
},
{
"epoch": 0.37,
"learning_rate": 4.0786633514576903e-05,
"loss": 0.8592,
"step": 13500
},
{
"epoch": 0.38,
"learning_rate": 4.0445094128254876e-05,
"loss": 0.8526,
"step": 14000
},
{
"epoch": 0.4,
"learning_rate": 4.010355474193284e-05,
"loss": 0.8501,
"step": 14500
},
{
"epoch": 0.41,
"learning_rate": 3.976201535561081e-05,
"loss": 0.848,
"step": 15000
},
{
"epoch": 0.42,
"learning_rate": 3.9421159048061424e-05,
"loss": 0.8442,
"step": 15500
},
{
"epoch": 0.44,
"learning_rate": 3.907961966173939e-05,
"loss": 0.839,
"step": 16000
},
{
"epoch": 0.45,
"learning_rate": 3.8738080275417363e-05,
"loss": 0.8408,
"step": 16500
},
{
"epoch": 0.46,
"learning_rate": 3.8396540889095336e-05,
"loss": 0.8349,
"step": 17000
},
{
"epoch": 0.48,
"learning_rate": 3.80550015027733e-05,
"loss": 0.8324,
"step": 17500
},
{
"epoch": 0.49,
"learning_rate": 3.771346211645127e-05,
"loss": 0.8305,
"step": 18000
},
{
"epoch": 0.51,
"learning_rate": 3.737192273012924e-05,
"loss": 0.8276,
"step": 18500
},
{
"epoch": 0.52,
"learning_rate": 3.7030383343807215e-05,
"loss": 0.8235,
"step": 19000
},
{
"epoch": 0.53,
"learning_rate": 3.668884395748518e-05,
"loss": 0.8206,
"step": 19500
},
{
"epoch": 0.55,
"learning_rate": 3.634730457116315e-05,
"loss": 0.8225,
"step": 20000
},
{
"epoch": 0.56,
"learning_rate": 3.600576518484112e-05,
"loss": 0.8162,
"step": 20500
},
{
"epoch": 0.57,
"learning_rate": 3.5664225798519086e-05,
"loss": 0.8179,
"step": 21000
},
{
"epoch": 0.59,
"learning_rate": 3.532268641219705e-05,
"loss": 0.8136,
"step": 21500
},
{
"epoch": 0.6,
"learning_rate": 3.4981147025875025e-05,
"loss": 0.8137,
"step": 22000
},
{
"epoch": 0.61,
"learning_rate": 3.4639607639553e-05,
"loss": 0.8061,
"step": 22500
},
{
"epoch": 0.63,
"learning_rate": 3.4298068253230964e-05,
"loss": 0.807,
"step": 23000
},
{
"epoch": 0.64,
"learning_rate": 3.395652886690893e-05,
"loss": 0.8053,
"step": 23500
},
{
"epoch": 0.66,
"learning_rate": 3.36149894805869e-05,
"loss": 0.8002,
"step": 24000
},
{
"epoch": 0.67,
"learning_rate": 3.3273450094264876e-05,
"loss": 0.7994,
"step": 24500
},
{
"epoch": 0.68,
"learning_rate": 3.293191070794284e-05,
"loss": 0.7987,
"step": 25000
},
{
"epoch": 0.7,
"learning_rate": 3.259037132162081e-05,
"loss": 0.7956,
"step": 25500
},
{
"epoch": 0.71,
"learning_rate": 3.224883193529878e-05,
"loss": 0.7939,
"step": 26000
},
{
"epoch": 0.72,
"learning_rate": 3.190729254897675e-05,
"loss": 0.791,
"step": 26500
},
{
"epoch": 0.74,
"learning_rate": 3.156575316265472e-05,
"loss": 0.7904,
"step": 27000
},
{
"epoch": 0.75,
"learning_rate": 3.1224213776332687e-05,
"loss": 0.7879,
"step": 27500
},
{
"epoch": 0.77,
"learning_rate": 3.088267439001066e-05,
"loss": 0.786,
"step": 28000
},
{
"epoch": 0.78,
"learning_rate": 3.0541135003688626e-05,
"loss": 0.786,
"step": 28500
},
{
"epoch": 0.79,
"learning_rate": 3.0199595617366595e-05,
"loss": 0.7827,
"step": 29000
},
{
"epoch": 0.81,
"learning_rate": 2.9858056231044568e-05,
"loss": 0.7829,
"step": 29500
},
{
"epoch": 0.82,
"learning_rate": 2.9516516844722538e-05,
"loss": 0.7805,
"step": 30000
},
{
"epoch": 0.83,
"learning_rate": 2.9174977458400504e-05,
"loss": 0.7795,
"step": 30500
},
{
"epoch": 0.85,
"learning_rate": 2.8834121150851116e-05,
"loss": 0.7776,
"step": 31000
},
{
"epoch": 0.86,
"learning_rate": 2.8492581764529086e-05,
"loss": 0.775,
"step": 31500
},
{
"epoch": 0.87,
"learning_rate": 2.8151725456979698e-05,
"loss": 0.7732,
"step": 32000
},
{
"epoch": 0.89,
"learning_rate": 2.7810186070657668e-05,
"loss": 0.7745,
"step": 32500
},
{
"epoch": 0.9,
"learning_rate": 2.746864668433564e-05,
"loss": 0.7746,
"step": 33000
},
{
"epoch": 0.92,
"learning_rate": 2.712710729801361e-05,
"loss": 0.7717,
"step": 33500
},
{
"epoch": 0.93,
"learning_rate": 2.6786250990464222e-05,
"loss": 0.77,
"step": 34000
},
{
"epoch": 0.94,
"learning_rate": 2.6444711604142192e-05,
"loss": 0.7667,
"step": 34500
},
{
"epoch": 0.96,
"learning_rate": 2.610317221782016e-05,
"loss": 0.7684,
"step": 35000
},
{
"epoch": 0.97,
"learning_rate": 2.5761632831498128e-05,
"loss": 0.7643,
"step": 35500
},
{
"epoch": 0.98,
"learning_rate": 2.5422142681494032e-05,
"loss": 0.7638,
"step": 36000
},
{
"epoch": 1.0,
"learning_rate": 2.5080603295172002e-05,
"loss": 0.7651,
"step": 36500
},
{
"epoch": 1.01,
"learning_rate": 2.4739063908849968e-05,
"loss": 0.7578,
"step": 37000
},
{
"epoch": 1.02,
"learning_rate": 2.439752452252794e-05,
"loss": 0.7578,
"step": 37500
},
{
"epoch": 1.04,
"learning_rate": 2.4055985136205907e-05,
"loss": 0.7576,
"step": 38000
},
{
"epoch": 1.05,
"learning_rate": 2.371444574988388e-05,
"loss": 0.7572,
"step": 38500
},
{
"epoch": 1.07,
"learning_rate": 2.3372906363561846e-05,
"loss": 0.7549,
"step": 39000
},
{
"epoch": 1.08,
"learning_rate": 2.3031366977239816e-05,
"loss": 0.7552,
"step": 39500
},
{
"epoch": 1.09,
"learning_rate": 2.2689827590917785e-05,
"loss": 0.7524,
"step": 40000
},
{
"epoch": 1.11,
"learning_rate": 2.2348288204595755e-05,
"loss": 0.7505,
"step": 40500
},
{
"epoch": 1.12,
"learning_rate": 2.2006748818273724e-05,
"loss": 0.7513,
"step": 41000
},
{
"epoch": 1.13,
"learning_rate": 2.1665209431951694e-05,
"loss": 0.7476,
"step": 41500
},
{
"epoch": 1.15,
"learning_rate": 2.1323670045629663e-05,
"loss": 0.7489,
"step": 42000
},
{
"epoch": 1.16,
"learning_rate": 2.0982813738080276e-05,
"loss": 0.7483,
"step": 42500
},
{
"epoch": 1.17,
"learning_rate": 2.064127435175825e-05,
"loss": 0.7447,
"step": 43000
},
{
"epoch": 1.19,
"learning_rate": 2.0299734965436215e-05,
"loss": 0.7472,
"step": 43500
},
{
"epoch": 1.2,
"learning_rate": 1.9958195579114184e-05,
"loss": 0.7436,
"step": 44000
},
{
"epoch": 1.22,
"learning_rate": 1.9616656192792154e-05,
"loss": 0.7428,
"step": 44500
},
{
"epoch": 1.23,
"learning_rate": 1.9275116806470123e-05,
"loss": 0.7411,
"step": 45000
},
{
"epoch": 1.24,
"learning_rate": 1.8933577420148093e-05,
"loss": 0.7421,
"step": 45500
},
{
"epoch": 1.26,
"learning_rate": 1.8592038033826062e-05,
"loss": 0.7397,
"step": 46000
},
{
"epoch": 1.27,
"learning_rate": 1.825049864750403e-05,
"loss": 0.7396,
"step": 46500
},
{
"epoch": 1.28,
"learning_rate": 1.7908959261182e-05,
"loss": 0.7372,
"step": 47000
},
{
"epoch": 1.3,
"learning_rate": 1.756741987485997e-05,
"loss": 0.7367,
"step": 47500
},
{
"epoch": 1.31,
"learning_rate": 1.7225880488537937e-05,
"loss": 0.7381,
"step": 48000
},
{
"epoch": 1.33,
"learning_rate": 1.688434110221591e-05,
"loss": 0.7357,
"step": 48500
},
{
"epoch": 1.34,
"learning_rate": 1.6543484794666522e-05,
"loss": 0.7331,
"step": 49000
},
{
"epoch": 1.35,
"learning_rate": 1.6201945408344492e-05,
"loss": 0.7346,
"step": 49500
},
{
"epoch": 1.37,
"learning_rate": 1.586040602202246e-05,
"loss": 0.7328,
"step": 50000
},
{
"epoch": 1.38,
"learning_rate": 1.5519549714473074e-05,
"loss": 0.7319,
"step": 50500
},
{
"epoch": 1.39,
"learning_rate": 1.5178010328151043e-05,
"loss": 0.73,
"step": 51000
},
{
"epoch": 1.41,
"learning_rate": 1.4836470941829011e-05,
"loss": 0.7314,
"step": 51500
},
{
"epoch": 1.42,
"learning_rate": 1.4494931555506982e-05,
"loss": 0.7281,
"step": 52000
},
{
"epoch": 1.43,
"learning_rate": 1.415339216918495e-05,
"loss": 0.7282,
"step": 52500
},
{
"epoch": 1.45,
"learning_rate": 1.381185278286292e-05,
"loss": 0.7275,
"step": 53000
},
{
"epoch": 1.46,
"learning_rate": 1.3470996475313532e-05,
"loss": 0.7286,
"step": 53500
},
{
"epoch": 1.48,
"learning_rate": 1.3129457088991503e-05,
"loss": 0.727,
"step": 54000
},
{
"epoch": 1.49,
"learning_rate": 1.2787917702669471e-05,
"loss": 0.7263,
"step": 54500
},
{
"epoch": 1.5,
"learning_rate": 1.244637831634744e-05,
"loss": 0.7261,
"step": 55000
},
{
"epoch": 1.52,
"learning_rate": 1.2104838930025412e-05,
"loss": 0.7241,
"step": 55500
},
{
"epoch": 1.53,
"learning_rate": 1.1763299543703381e-05,
"loss": 0.7243,
"step": 56000
},
{
"epoch": 1.54,
"learning_rate": 1.1421760157381351e-05,
"loss": 0.7246,
"step": 56500
},
{
"epoch": 1.56,
"learning_rate": 1.1080220771059319e-05,
"loss": 0.7249,
"step": 57000
},
{
"epoch": 1.57,
"learning_rate": 1.0738681384737288e-05,
"loss": 0.7193,
"step": 57500
},
{
"epoch": 1.58,
"learning_rate": 1.0397141998415258e-05,
"loss": 0.7206,
"step": 58000
},
{
"epoch": 1.6,
"learning_rate": 1.0055602612093227e-05,
"loss": 0.72,
"step": 58500
},
{
"epoch": 1.61,
"learning_rate": 9.714746304543841e-06,
"loss": 0.7189,
"step": 59000
},
{
"epoch": 1.63,
"learning_rate": 9.37320691822181e-06,
"loss": 0.7193,
"step": 59500
},
{
"epoch": 1.64,
"learning_rate": 9.031667531899779e-06,
"loss": 0.7173,
"step": 60000
},
{
"epoch": 1.65,
"learning_rate": 8.690128145577748e-06,
"loss": 0.7181,
"step": 60500
},
{
"epoch": 1.67,
"learning_rate": 8.348588759255718e-06,
"loss": 0.7155,
"step": 61000
},
{
"epoch": 1.68,
"learning_rate": 8.007049372933686e-06,
"loss": 0.7183,
"step": 61500
},
{
"epoch": 1.69,
"learning_rate": 7.665509986611655e-06,
"loss": 0.7175,
"step": 62000
},
{
"epoch": 1.71,
"learning_rate": 7.323970600289626e-06,
"loss": 0.7157,
"step": 62500
},
{
"epoch": 1.72,
"learning_rate": 6.98311429274024e-06,
"loss": 0.7164,
"step": 63000
},
{
"epoch": 1.74,
"learning_rate": 6.642257985190853e-06,
"loss": 0.7147,
"step": 63500
},
{
"epoch": 1.75,
"learning_rate": 6.3007185988688224e-06,
"loss": 0.7143,
"step": 64000
},
{
"epoch": 1.76,
"learning_rate": 5.959179212546791e-06,
"loss": 0.713,
"step": 64500
},
{
"epoch": 1.78,
"learning_rate": 5.617639826224761e-06,
"loss": 0.7115,
"step": 65000
},
{
"epoch": 1.79,
"learning_rate": 5.27610043990273e-06,
"loss": 0.7106,
"step": 65500
},
{
"epoch": 1.8,
"learning_rate": 4.935244132353343e-06,
"loss": 0.7095,
"step": 66000
},
{
"epoch": 1.82,
"learning_rate": 4.593704746031312e-06,
"loss": 0.7119,
"step": 66500
},
{
"epoch": 1.83,
"learning_rate": 4.2521653597092825e-06,
"loss": 0.7099,
"step": 67000
},
{
"epoch": 1.84,
"learning_rate": 3.910625973387251e-06,
"loss": 0.7084,
"step": 67500
},
{
"epoch": 1.86,
"learning_rate": 3.5690865870652207e-06,
"loss": 0.71,
"step": 68000
},
{
"epoch": 1.87,
"learning_rate": 3.22754720074319e-06,
"loss": 0.7093,
"step": 68500
},
{
"epoch": 1.89,
"learning_rate": 2.886690893193803e-06,
"loss": 0.7086,
"step": 69000
},
{
"epoch": 1.9,
"learning_rate": 2.5451515068717725e-06,
"loss": 0.7096,
"step": 69500
},
{
"epoch": 1.91,
"learning_rate": 2.204295199322386e-06,
"loss": 0.7072,
"step": 70000
},
{
"epoch": 1.93,
"learning_rate": 1.8627558130003552e-06,
"loss": 0.707,
"step": 70500
},
{
"epoch": 1.94,
"learning_rate": 1.5212164266783245e-06,
"loss": 0.7087,
"step": 71000
},
{
"epoch": 1.95,
"learning_rate": 1.180360119128938e-06,
"loss": 0.707,
"step": 71500
},
{
"epoch": 1.97,
"learning_rate": 8.388207328069074e-07,
"loss": 0.7074,
"step": 72000
},
{
"epoch": 1.98,
"learning_rate": 4.972813464848767e-07,
"loss": 0.7069,
"step": 72500
},
{
"epoch": 1.99,
"learning_rate": 1.5574196016284598e-07,
"loss": 0.7074,
"step": 73000
},
{
"epoch": 2.0,
"step": 73198,
"total_flos": 9.8240404403142e+18,
"train_loss": 0.795374077400056,
"train_runtime": 74284.0357,
"train_samples_per_second": 141.892,
"train_steps_per_second": 0.985
}
],
"logging_steps": 500,
"max_steps": 73198,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 9.8240404403142e+18,
"trial_name": null,
"trial_params": null
}