Yelp_French / trainer_state.json
pgfeldman's picture
Upload 12 files
5d794c5
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"global_step": 35574,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 4.9297239556979816e-05,
"loss": 3.4444,
"step": 500
},
{
"epoch": 0.17,
"learning_rate": 4.8594479113959636e-05,
"loss": 3.339,
"step": 1000
},
{
"epoch": 0.25,
"learning_rate": 4.789171867093945e-05,
"loss": 3.3261,
"step": 1500
},
{
"epoch": 0.34,
"learning_rate": 4.718895822791927e-05,
"loss": 3.2812,
"step": 2000
},
{
"epoch": 0.42,
"learning_rate": 4.648619778489909e-05,
"loss": 3.25,
"step": 2500
},
{
"epoch": 0.51,
"learning_rate": 4.5783437341878904e-05,
"loss": 3.2331,
"step": 3000
},
{
"epoch": 0.59,
"learning_rate": 4.508067689885872e-05,
"loss": 3.2195,
"step": 3500
},
{
"epoch": 0.67,
"learning_rate": 4.437791645583854e-05,
"loss": 3.2063,
"step": 4000
},
{
"epoch": 0.76,
"learning_rate": 4.367515601281835e-05,
"loss": 3.2028,
"step": 4500
},
{
"epoch": 0.84,
"learning_rate": 4.297239556979817e-05,
"loss": 3.1897,
"step": 5000
},
{
"epoch": 0.93,
"learning_rate": 4.2269635126777984e-05,
"loss": 3.1792,
"step": 5500
},
{
"epoch": 1.01,
"learning_rate": 4.15668746837578e-05,
"loss": 3.1564,
"step": 6000
},
{
"epoch": 1.1,
"learning_rate": 4.0864114240737625e-05,
"loss": 3.0599,
"step": 6500
},
{
"epoch": 1.18,
"learning_rate": 4.016135379771744e-05,
"loss": 3.0641,
"step": 7000
},
{
"epoch": 1.26,
"learning_rate": 3.945859335469725e-05,
"loss": 3.0661,
"step": 7500
},
{
"epoch": 1.35,
"learning_rate": 3.875583291167707e-05,
"loss": 3.0731,
"step": 8000
},
{
"epoch": 1.43,
"learning_rate": 3.8053072468656886e-05,
"loss": 3.0555,
"step": 8500
},
{
"epoch": 1.52,
"learning_rate": 3.73503120256367e-05,
"loss": 3.0592,
"step": 9000
},
{
"epoch": 1.6,
"learning_rate": 3.664755158261652e-05,
"loss": 3.0618,
"step": 9500
},
{
"epoch": 1.69,
"learning_rate": 3.594479113959633e-05,
"loss": 3.0592,
"step": 10000
},
{
"epoch": 1.77,
"learning_rate": 3.524203069657615e-05,
"loss": 3.0496,
"step": 10500
},
{
"epoch": 1.86,
"learning_rate": 3.453927025355597e-05,
"loss": 3.0396,
"step": 11000
},
{
"epoch": 1.94,
"learning_rate": 3.383650981053579e-05,
"loss": 3.0479,
"step": 11500
},
{
"epoch": 2.02,
"learning_rate": 3.313374936751561e-05,
"loss": 3.0178,
"step": 12000
},
{
"epoch": 2.11,
"learning_rate": 3.243098892449542e-05,
"loss": 2.9502,
"step": 12500
},
{
"epoch": 2.19,
"learning_rate": 3.1728228481475234e-05,
"loss": 2.9585,
"step": 13000
},
{
"epoch": 2.28,
"learning_rate": 3.1025468038455054e-05,
"loss": 2.968,
"step": 13500
},
{
"epoch": 2.36,
"learning_rate": 3.0322707595434867e-05,
"loss": 2.9591,
"step": 14000
},
{
"epoch": 2.45,
"learning_rate": 2.9619947152414684e-05,
"loss": 2.9585,
"step": 14500
},
{
"epoch": 2.53,
"learning_rate": 2.89171867093945e-05,
"loss": 2.9628,
"step": 15000
},
{
"epoch": 2.61,
"learning_rate": 2.821442626637432e-05,
"loss": 2.9712,
"step": 15500
},
{
"epoch": 2.7,
"learning_rate": 2.7511665823354138e-05,
"loss": 2.9583,
"step": 16000
},
{
"epoch": 2.78,
"learning_rate": 2.6808905380333955e-05,
"loss": 2.9491,
"step": 16500
},
{
"epoch": 2.87,
"learning_rate": 2.610614493731377e-05,
"loss": 2.9594,
"step": 17000
},
{
"epoch": 2.95,
"learning_rate": 2.5403384494293585e-05,
"loss": 2.9673,
"step": 17500
},
{
"epoch": 3.04,
"learning_rate": 2.4700624051273402e-05,
"loss": 2.9344,
"step": 18000
},
{
"epoch": 3.12,
"learning_rate": 2.3997863608253223e-05,
"loss": 2.8771,
"step": 18500
},
{
"epoch": 3.2,
"learning_rate": 2.3295103165233036e-05,
"loss": 2.8863,
"step": 19000
},
{
"epoch": 3.29,
"learning_rate": 2.2592342722212853e-05,
"loss": 2.8845,
"step": 19500
},
{
"epoch": 3.37,
"learning_rate": 2.188958227919267e-05,
"loss": 2.9015,
"step": 20000
},
{
"epoch": 3.46,
"learning_rate": 2.1186821836172487e-05,
"loss": 2.8927,
"step": 20500
},
{
"epoch": 3.54,
"learning_rate": 2.0484061393152303e-05,
"loss": 2.889,
"step": 21000
},
{
"epoch": 3.63,
"learning_rate": 1.978130095013212e-05,
"loss": 2.9037,
"step": 21500
},
{
"epoch": 3.71,
"learning_rate": 1.9078540507111937e-05,
"loss": 2.8995,
"step": 22000
},
{
"epoch": 3.79,
"learning_rate": 1.837578006409175e-05,
"loss": 2.8922,
"step": 22500
},
{
"epoch": 3.88,
"learning_rate": 1.767301962107157e-05,
"loss": 2.8954,
"step": 23000
},
{
"epoch": 3.96,
"learning_rate": 1.6970259178051388e-05,
"loss": 2.9017,
"step": 23500
},
{
"epoch": 4.05,
"learning_rate": 1.6267498735031205e-05,
"loss": 2.8701,
"step": 24000
},
{
"epoch": 4.13,
"learning_rate": 1.5564738292011018e-05,
"loss": 2.8549,
"step": 24500
},
{
"epoch": 4.22,
"learning_rate": 1.4861977848990838e-05,
"loss": 2.8502,
"step": 25000
},
{
"epoch": 4.3,
"learning_rate": 1.4159217405970653e-05,
"loss": 2.846,
"step": 25500
},
{
"epoch": 4.39,
"learning_rate": 1.345645696295047e-05,
"loss": 2.8497,
"step": 26000
},
{
"epoch": 4.47,
"learning_rate": 1.2753696519930285e-05,
"loss": 2.8284,
"step": 26500
},
{
"epoch": 4.55,
"learning_rate": 1.2050936076910104e-05,
"loss": 2.8432,
"step": 27000
},
{
"epoch": 4.64,
"learning_rate": 1.1348175633889919e-05,
"loss": 2.8468,
"step": 27500
},
{
"epoch": 4.72,
"learning_rate": 1.0645415190869738e-05,
"loss": 2.8425,
"step": 28000
},
{
"epoch": 4.81,
"learning_rate": 9.942654747849553e-06,
"loss": 2.8403,
"step": 28500
},
{
"epoch": 4.89,
"learning_rate": 9.23989430482937e-06,
"loss": 2.8276,
"step": 29000
},
{
"epoch": 4.98,
"learning_rate": 8.537133861809187e-06,
"loss": 2.8549,
"step": 29500
},
{
"epoch": 5.06,
"learning_rate": 7.834373418789003e-06,
"loss": 2.8061,
"step": 30000
},
{
"epoch": 5.14,
"learning_rate": 7.13161297576882e-06,
"loss": 2.8111,
"step": 30500
},
{
"epoch": 5.23,
"learning_rate": 6.428852532748637e-06,
"loss": 2.8153,
"step": 31000
},
{
"epoch": 5.31,
"learning_rate": 5.726092089728453e-06,
"loss": 2.8129,
"step": 31500
},
{
"epoch": 5.4,
"learning_rate": 5.02333164670827e-06,
"loss": 2.812,
"step": 32000
},
{
"epoch": 5.48,
"learning_rate": 4.320571203688087e-06,
"loss": 2.8134,
"step": 32500
},
{
"epoch": 5.57,
"learning_rate": 3.6178107606679037e-06,
"loss": 2.8172,
"step": 33000
},
{
"epoch": 5.65,
"learning_rate": 2.9150503176477205e-06,
"loss": 2.809,
"step": 33500
},
{
"epoch": 5.73,
"learning_rate": 2.212289874627537e-06,
"loss": 2.8115,
"step": 34000
},
{
"epoch": 5.82,
"learning_rate": 1.5095294316073538e-06,
"loss": 2.81,
"step": 34500
},
{
"epoch": 5.9,
"learning_rate": 8.067689885871704e-07,
"loss": 2.8087,
"step": 35000
},
{
"epoch": 5.99,
"learning_rate": 1.0400854556698713e-07,
"loss": 2.799,
"step": 35500
},
{
"epoch": 6.0,
"step": 35574,
"total_flos": 1.8590405492736e+16,
"train_loss": 2.969814732685026,
"train_runtime": 9127.501,
"train_samples_per_second": 3.897,
"train_steps_per_second": 3.897
}
],
"max_steps": 35574,
"num_train_epochs": 6,
"total_flos": 1.8590405492736e+16,
"trial_name": null,
"trial_params": null
}