t5_small_race_mutlirc / trainer_state.json
mamlong34's picture
End of training
8d375ef
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 42423,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 5e-05,
"loss": 1.3894,
"step": 500
},
{
"epoch": 0.07,
"learning_rate": 0.0001,
"loss": 0.7218,
"step": 1000
},
{
"epoch": 0.11,
"learning_rate": 9.879294111966783e-05,
"loss": 0.7089,
"step": 1500
},
{
"epoch": 0.14,
"learning_rate": 9.758588223933563e-05,
"loss": 0.6974,
"step": 2000
},
{
"epoch": 0.18,
"learning_rate": 9.637882335900346e-05,
"loss": 0.68,
"step": 2500
},
{
"epoch": 0.21,
"learning_rate": 9.517176447867127e-05,
"loss": 0.6789,
"step": 3000
},
{
"epoch": 0.25,
"learning_rate": 9.396470559833909e-05,
"loss": 0.6668,
"step": 3500
},
{
"epoch": 0.28,
"learning_rate": 9.275764671800691e-05,
"loss": 0.6663,
"step": 4000
},
{
"epoch": 0.32,
"learning_rate": 9.155058783767473e-05,
"loss": 0.6634,
"step": 4500
},
{
"epoch": 0.35,
"learning_rate": 9.034352895734254e-05,
"loss": 0.6529,
"step": 5000
},
{
"epoch": 0.39,
"learning_rate": 8.913647007701035e-05,
"loss": 0.6555,
"step": 5500
},
{
"epoch": 0.42,
"learning_rate": 8.792941119667818e-05,
"loss": 0.6464,
"step": 6000
},
{
"epoch": 0.46,
"learning_rate": 8.6722352316346e-05,
"loss": 0.6397,
"step": 6500
},
{
"epoch": 0.5,
"learning_rate": 8.55152934360138e-05,
"loss": 0.6439,
"step": 7000
},
{
"epoch": 0.53,
"learning_rate": 8.430823455568163e-05,
"loss": 0.6393,
"step": 7500
},
{
"epoch": 0.57,
"learning_rate": 8.310117567534945e-05,
"loss": 0.6352,
"step": 8000
},
{
"epoch": 0.6,
"learning_rate": 8.189411679501726e-05,
"loss": 0.6304,
"step": 8500
},
{
"epoch": 0.64,
"learning_rate": 8.068705791468509e-05,
"loss": 0.6344,
"step": 9000
},
{
"epoch": 0.67,
"learning_rate": 7.94799990343529e-05,
"loss": 0.6336,
"step": 9500
},
{
"epoch": 0.71,
"learning_rate": 7.827294015402071e-05,
"loss": 0.623,
"step": 10000
},
{
"epoch": 0.74,
"learning_rate": 7.706588127368853e-05,
"loss": 0.6186,
"step": 10500
},
{
"epoch": 0.78,
"learning_rate": 7.585882239335635e-05,
"loss": 0.6288,
"step": 11000
},
{
"epoch": 0.81,
"learning_rate": 7.465176351302417e-05,
"loss": 0.6156,
"step": 11500
},
{
"epoch": 0.85,
"learning_rate": 7.344470463269198e-05,
"loss": 0.6113,
"step": 12000
},
{
"epoch": 0.88,
"learning_rate": 7.223764575235981e-05,
"loss": 0.6182,
"step": 12500
},
{
"epoch": 0.92,
"learning_rate": 7.103058687202762e-05,
"loss": 0.6233,
"step": 13000
},
{
"epoch": 0.95,
"learning_rate": 6.982352799169543e-05,
"loss": 0.6042,
"step": 13500
},
{
"epoch": 0.99,
"learning_rate": 6.861646911136326e-05,
"loss": 0.6043,
"step": 14000
},
{
"epoch": 1.0,
"eval_accuracy": 0.4832,
"eval_loss": 0.5925313234329224,
"eval_runtime": 306.7871,
"eval_samples_per_second": 25.659,
"eval_steps_per_second": 1.604,
"step": 14141
},
{
"epoch": 1.03,
"learning_rate": 6.740941023103108e-05,
"loss": 0.5816,
"step": 14500
},
{
"epoch": 1.06,
"learning_rate": 6.620235135069889e-05,
"loss": 0.5907,
"step": 15000
},
{
"epoch": 1.1,
"learning_rate": 6.499529247036672e-05,
"loss": 0.5814,
"step": 15500
},
{
"epoch": 1.13,
"learning_rate": 6.378823359003453e-05,
"loss": 0.5949,
"step": 16000
},
{
"epoch": 1.17,
"learning_rate": 6.258117470970234e-05,
"loss": 0.5838,
"step": 16500
},
{
"epoch": 1.2,
"learning_rate": 6.137411582937016e-05,
"loss": 0.5823,
"step": 17000
},
{
"epoch": 1.24,
"learning_rate": 6.016705694903798e-05,
"loss": 0.5872,
"step": 17500
},
{
"epoch": 1.27,
"learning_rate": 5.895999806870579e-05,
"loss": 0.5779,
"step": 18000
},
{
"epoch": 1.31,
"learning_rate": 5.775293918837361e-05,
"loss": 0.5744,
"step": 18500
},
{
"epoch": 1.34,
"learning_rate": 5.654588030804143e-05,
"loss": 0.5833,
"step": 19000
},
{
"epoch": 1.38,
"learning_rate": 5.533882142770924e-05,
"loss": 0.5855,
"step": 19500
},
{
"epoch": 1.41,
"learning_rate": 5.413176254737706e-05,
"loss": 0.5836,
"step": 20000
},
{
"epoch": 1.45,
"learning_rate": 5.292470366704488e-05,
"loss": 0.5793,
"step": 20500
},
{
"epoch": 1.49,
"learning_rate": 5.17176447867127e-05,
"loss": 0.57,
"step": 21000
},
{
"epoch": 1.52,
"learning_rate": 5.051058590638052e-05,
"loss": 0.5792,
"step": 21500
},
{
"epoch": 1.56,
"learning_rate": 4.930352702604834e-05,
"loss": 0.5577,
"step": 22000
},
{
"epoch": 1.59,
"learning_rate": 4.809646814571615e-05,
"loss": 0.5729,
"step": 22500
},
{
"epoch": 1.63,
"learning_rate": 4.6889409265383964e-05,
"loss": 0.5618,
"step": 23000
},
{
"epoch": 1.66,
"learning_rate": 4.5682350385051784e-05,
"loss": 0.5678,
"step": 23500
},
{
"epoch": 1.7,
"learning_rate": 4.4475291504719604e-05,
"loss": 0.5723,
"step": 24000
},
{
"epoch": 1.73,
"learning_rate": 4.326823262438742e-05,
"loss": 0.5616,
"step": 24500
},
{
"epoch": 1.77,
"learning_rate": 4.206117374405524e-05,
"loss": 0.5707,
"step": 25000
},
{
"epoch": 1.8,
"learning_rate": 4.085411486372305e-05,
"loss": 0.5626,
"step": 25500
},
{
"epoch": 1.84,
"learning_rate": 3.964705598339087e-05,
"loss": 0.5703,
"step": 26000
},
{
"epoch": 1.87,
"learning_rate": 3.843999710305869e-05,
"loss": 0.5683,
"step": 26500
},
{
"epoch": 1.91,
"learning_rate": 3.7232938222726505e-05,
"loss": 0.555,
"step": 27000
},
{
"epoch": 1.94,
"learning_rate": 3.6025879342394325e-05,
"loss": 0.5664,
"step": 27500
},
{
"epoch": 1.98,
"learning_rate": 3.481882046206214e-05,
"loss": 0.5647,
"step": 28000
},
{
"epoch": 2.0,
"eval_accuracy": 0.5152,
"eval_loss": 0.5659283995628357,
"eval_runtime": 306.5061,
"eval_samples_per_second": 25.683,
"eval_steps_per_second": 1.605,
"step": 28282
},
{
"epoch": 2.02,
"learning_rate": 3.361176158172996e-05,
"loss": 0.5582,
"step": 28500
},
{
"epoch": 2.05,
"learning_rate": 3.240470270139778e-05,
"loss": 0.5302,
"step": 29000
},
{
"epoch": 2.09,
"learning_rate": 3.119764382106559e-05,
"loss": 0.545,
"step": 29500
},
{
"epoch": 2.12,
"learning_rate": 2.9990584940733412e-05,
"loss": 0.5382,
"step": 30000
},
{
"epoch": 2.16,
"learning_rate": 2.878352606040123e-05,
"loss": 0.5406,
"step": 30500
},
{
"epoch": 2.19,
"learning_rate": 2.7576467180069043e-05,
"loss": 0.5418,
"step": 31000
},
{
"epoch": 2.23,
"learning_rate": 2.6369408299736863e-05,
"loss": 0.5378,
"step": 31500
},
{
"epoch": 2.26,
"learning_rate": 2.516234941940468e-05,
"loss": 0.5412,
"step": 32000
},
{
"epoch": 2.3,
"learning_rate": 2.3955290539072496e-05,
"loss": 0.538,
"step": 32500
},
{
"epoch": 2.33,
"learning_rate": 2.2748231658740317e-05,
"loss": 0.5227,
"step": 33000
},
{
"epoch": 2.37,
"learning_rate": 2.154117277840813e-05,
"loss": 0.5314,
"step": 33500
},
{
"epoch": 2.4,
"learning_rate": 2.033411389807595e-05,
"loss": 0.5342,
"step": 34000
},
{
"epoch": 2.44,
"learning_rate": 1.9127055017743767e-05,
"loss": 0.5381,
"step": 34500
},
{
"epoch": 2.48,
"learning_rate": 1.7919996137411584e-05,
"loss": 0.5349,
"step": 35000
},
{
"epoch": 2.51,
"learning_rate": 1.67129372570794e-05,
"loss": 0.5349,
"step": 35500
},
{
"epoch": 2.55,
"learning_rate": 1.5505878376747217e-05,
"loss": 0.5409,
"step": 36000
},
{
"epoch": 2.58,
"learning_rate": 1.4298819496415036e-05,
"loss": 0.5149,
"step": 36500
},
{
"epoch": 2.62,
"learning_rate": 1.3091760616082854e-05,
"loss": 0.5289,
"step": 37000
},
{
"epoch": 2.65,
"learning_rate": 1.1884701735750671e-05,
"loss": 0.5207,
"step": 37500
},
{
"epoch": 2.69,
"learning_rate": 1.0677642855418488e-05,
"loss": 0.5263,
"step": 38000
},
{
"epoch": 2.72,
"learning_rate": 9.470583975086306e-06,
"loss": 0.5246,
"step": 38500
},
{
"epoch": 2.76,
"learning_rate": 8.263525094754123e-06,
"loss": 0.5242,
"step": 39000
},
{
"epoch": 2.79,
"learning_rate": 7.056466214421941e-06,
"loss": 0.5349,
"step": 39500
},
{
"epoch": 2.83,
"learning_rate": 5.849407334089757e-06,
"loss": 0.5321,
"step": 40000
},
{
"epoch": 2.86,
"learning_rate": 4.642348453757574e-06,
"loss": 0.5215,
"step": 40500
},
{
"epoch": 2.9,
"learning_rate": 3.435289573425392e-06,
"loss": 0.5194,
"step": 41000
},
{
"epoch": 2.93,
"learning_rate": 2.228230693093209e-06,
"loss": 0.5224,
"step": 41500
},
{
"epoch": 2.97,
"learning_rate": 1.0211718127610264e-06,
"loss": 0.5237,
"step": 42000
},
{
"epoch": 3.0,
"eval_accuracy": 0.5259,
"eval_loss": 0.5759526491165161,
"eval_runtime": 306.403,
"eval_samples_per_second": 25.692,
"eval_steps_per_second": 1.606,
"step": 42423
},
{
"epoch": 3.0,
"step": 42423,
"total_flos": 4.593284195077325e+16,
"train_loss": 0.05477445060331273,
"train_runtime": 3408.5838,
"train_samples_per_second": 99.567,
"train_steps_per_second": 12.446
}
],
"max_steps": 42423,
"num_train_epochs": 3,
"total_flos": 4.593284195077325e+16,
"trial_name": null,
"trial_params": null
}