cpp_and_text_gpt2 / trainer_state.json
gbemilekeonilude's picture
End of training
122a720 verified
{
"best_metric": 1.6777832508087158,
"best_model_checkpoint": "/data/user_data/gonilude/cpp_and_text_gpt2/checkpoint-200",
"epoch": 3.0,
"eval_steps": 50,
"global_step": 237,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.2,
"eval_loss": 4.986144065856934,
"eval_runtime": 1.1285,
"eval_samples_per_second": 62.027,
"eval_steps_per_second": 7.975,
"num_input_tokens_seen": 0,
"step": 0
},
{
"epoch": 0.012658227848101266,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 6.4794,
"num_input_tokens_seen": 8192,
"step": 1
},
{
"epoch": 0.06329113924050633,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 6.133,
"num_input_tokens_seen": 40960,
"step": 5
},
{
"epoch": 0.12658227848101267,
"grad_norm": 1133.67919921875,
"learning_rate": 5e-06,
"loss": 5.5324,
"num_input_tokens_seen": 81920,
"step": 10
},
{
"epoch": 0.189873417721519,
"grad_norm": 198.05026245117188,
"learning_rate": 1.7500000000000002e-05,
"loss": 6.6948,
"num_input_tokens_seen": 122880,
"step": 15
},
{
"epoch": 0.25316455696202533,
"grad_norm": 115.6029052734375,
"learning_rate": 1.9984947460216708e-05,
"loss": 4.4825,
"num_input_tokens_seen": 163840,
"step": 20
},
{
"epoch": 0.31645569620253167,
"grad_norm": 88.50775909423828,
"learning_rate": 1.9923874174311394e-05,
"loss": 4.1532,
"num_input_tokens_seen": 204800,
"step": 25
},
{
"epoch": 0.379746835443038,
"grad_norm": 107.87947082519531,
"learning_rate": 1.9816126380411478e-05,
"loss": 4.0467,
"num_input_tokens_seen": 245760,
"step": 30
},
{
"epoch": 0.4430379746835443,
"grad_norm": 23.852434158325195,
"learning_rate": 1.9662210843836574e-05,
"loss": 1.649,
"num_input_tokens_seen": 286720,
"step": 35
},
{
"epoch": 0.5063291139240507,
"grad_norm": 12.68623161315918,
"learning_rate": 1.946285146855968e-05,
"loss": 1.8697,
"num_input_tokens_seen": 327680,
"step": 40
},
{
"epoch": 0.569620253164557,
"grad_norm": 13.480600357055664,
"learning_rate": 1.921898589250242e-05,
"loss": 1.8013,
"num_input_tokens_seen": 368640,
"step": 45
},
{
"epoch": 0.6329113924050633,
"grad_norm": 18.860410690307617,
"learning_rate": 1.8931761077585037e-05,
"loss": 1.8815,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 0.6329113924050633,
"eval_accuracy": 0.18571428571428572,
"eval_loss": 1.7072056531906128,
"eval_runtime": 0.9042,
"eval_samples_per_second": 77.413,
"eval_steps_per_second": 9.953,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 0.6962025316455697,
"grad_norm": 38.59328842163086,
"learning_rate": 1.860252791527236e-05,
"loss": 1.7259,
"num_input_tokens_seen": 450560,
"step": 55
},
{
"epoch": 0.759493670886076,
"grad_norm": 12.516915321350098,
"learning_rate": 1.8232834872987147e-05,
"loss": 1.8872,
"num_input_tokens_seen": 491520,
"step": 60
},
{
"epoch": 0.8227848101265823,
"grad_norm": 30.57850456237793,
"learning_rate": 1.782442071127338e-05,
"loss": 1.8078,
"num_input_tokens_seen": 532480,
"step": 65
},
{
"epoch": 0.8860759493670886,
"grad_norm": 15.95567512512207,
"learning_rate": 1.7379206305962525e-05,
"loss": 1.7026,
"num_input_tokens_seen": 573440,
"step": 70
},
{
"epoch": 0.9493670886075949,
"grad_norm": 16.61585235595703,
"learning_rate": 1.6899285613805246e-05,
"loss": 1.6721,
"num_input_tokens_seen": 614400,
"step": 75
},
{
"epoch": 1.0126582278481013,
"grad_norm": 17.780010223388672,
"learning_rate": 1.6386915824059427e-05,
"loss": 1.4982,
"num_input_tokens_seen": 655360,
"step": 80
},
{
"epoch": 1.0759493670886076,
"grad_norm": 15.395638465881348,
"learning_rate": 1.5844506742354163e-05,
"loss": 1.7322,
"num_input_tokens_seen": 696320,
"step": 85
},
{
"epoch": 1.139240506329114,
"grad_norm": 18.798755645751953,
"learning_rate": 1.5274609456760073e-05,
"loss": 1.6946,
"num_input_tokens_seen": 737280,
"step": 90
},
{
"epoch": 1.2025316455696202,
"grad_norm": 15.229305267333984,
"learning_rate": 1.4679904339372301e-05,
"loss": 1.6712,
"num_input_tokens_seen": 778240,
"step": 95
},
{
"epoch": 1.2658227848101267,
"grad_norm": 14.479815483093262,
"learning_rate": 1.4063188439837831e-05,
"loss": 1.6129,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 1.2658227848101267,
"eval_accuracy": 0.15714285714285714,
"eval_loss": 1.7499092817306519,
"eval_runtime": 0.9052,
"eval_samples_per_second": 77.327,
"eval_steps_per_second": 9.942,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 1.3291139240506329,
"grad_norm": 15.65237045288086,
"learning_rate": 1.3427362330118542e-05,
"loss": 1.6163,
"num_input_tokens_seen": 860160,
"step": 105
},
{
"epoch": 1.3924050632911391,
"grad_norm": 12.068513870239258,
"learning_rate": 1.2775416462362458e-05,
"loss": 1.6507,
"num_input_tokens_seen": 901120,
"step": 110
},
{
"epoch": 1.4556962025316456,
"grad_norm": 18.504961013793945,
"learning_rate": 1.2110417104045575e-05,
"loss": 1.6317,
"num_input_tokens_seen": 942080,
"step": 115
},
{
"epoch": 1.518987341772152,
"grad_norm": 13.793102264404297,
"learning_rate": 1.1435491916534919e-05,
"loss": 1.7895,
"num_input_tokens_seen": 983040,
"step": 120
},
{
"epoch": 1.5822784810126582,
"grad_norm": 22.34800148010254,
"learning_rate": 1.0753815244900459e-05,
"loss": 1.6955,
"num_input_tokens_seen": 1024000,
"step": 125
},
{
"epoch": 1.6455696202531644,
"grad_norm": 19.815095901489258,
"learning_rate": 1.0068593188161698e-05,
"loss": 1.686,
"num_input_tokens_seen": 1064960,
"step": 130
},
{
"epoch": 1.7088607594936709,
"grad_norm": 22.263498306274414,
"learning_rate": 9.383048520187344e-06,
"loss": 1.6595,
"num_input_tokens_seen": 1105920,
"step": 135
},
{
"epoch": 1.7721518987341773,
"grad_norm": 19.161380767822266,
"learning_rate": 8.700405532168921e-06,
"loss": 1.5961,
"num_input_tokens_seen": 1146880,
"step": 140
},
{
"epoch": 1.8354430379746836,
"grad_norm": 27.031253814697266,
"learning_rate": 8.023874867958027e-06,
"loss": 1.7879,
"num_input_tokens_seen": 1187840,
"step": 145
},
{
"epoch": 1.8987341772151898,
"grad_norm": 18.79669952392578,
"learning_rate": 7.3566384235904855e-06,
"loss": 1.6363,
"num_input_tokens_seen": 1228800,
"step": 150
},
{
"epoch": 1.8987341772151898,
"eval_accuracy": 0.15714285714285714,
"eval_loss": 1.7443429231643677,
"eval_runtime": 0.9046,
"eval_samples_per_second": 77.387,
"eval_steps_per_second": 9.95,
"num_input_tokens_seen": 1228800,
"step": 150
},
{
"epoch": 1.9620253164556962,
"grad_norm": 15.857290267944336,
"learning_rate": 6.7018343820188324e-06,
"loss": 1.7356,
"num_input_tokens_seen": 1269760,
"step": 155
},
{
"epoch": 2.0253164556962027,
"grad_norm": 14.174161911010742,
"learning_rate": 6.0625424534385425e-06,
"loss": 1.6213,
"num_input_tokens_seen": 1310720,
"step": 160
},
{
"epoch": 2.088607594936709,
"grad_norm": 19.495052337646484,
"learning_rate": 5.441769390626537e-06,
"loss": 1.6586,
"num_input_tokens_seen": 1351680,
"step": 165
},
{
"epoch": 2.151898734177215,
"grad_norm": 9.598532676696777,
"learning_rate": 4.842434847417001e-06,
"loss": 1.4322,
"num_input_tokens_seen": 1392640,
"step": 170
},
{
"epoch": 2.2151898734177213,
"grad_norm": 17.1945858001709,
"learning_rate": 4.267357646825746e-06,
"loss": 1.5919,
"num_input_tokens_seen": 1433600,
"step": 175
},
{
"epoch": 2.278481012658228,
"grad_norm": 17.863174438476562,
"learning_rate": 3.719242523407539e-06,
"loss": 1.5252,
"num_input_tokens_seen": 1474560,
"step": 180
},
{
"epoch": 2.3417721518987342,
"grad_norm": 8.82008171081543,
"learning_rate": 3.2006674022005857e-06,
"loss": 1.6203,
"num_input_tokens_seen": 1515520,
"step": 185
},
{
"epoch": 2.4050632911392404,
"grad_norm": 10.616914749145508,
"learning_rate": 2.714071274088438e-06,
"loss": 1.5487,
"num_input_tokens_seen": 1556480,
"step": 190
},
{
"epoch": 2.4683544303797467,
"grad_norm": 10.994531631469727,
"learning_rate": 2.2617427246045976e-06,
"loss": 1.6295,
"num_input_tokens_seen": 1597440,
"step": 195
},
{
"epoch": 2.5316455696202533,
"grad_norm": 18.025531768798828,
"learning_rate": 1.8458091701318504e-06,
"loss": 1.5486,
"num_input_tokens_seen": 1638400,
"step": 200
},
{
"epoch": 2.5316455696202533,
"eval_accuracy": 0.15714285714285714,
"eval_loss": 1.6777832508087158,
"eval_runtime": 0.909,
"eval_samples_per_second": 77.011,
"eval_steps_per_second": 9.901,
"num_input_tokens_seen": 1638400,
"step": 200
},
{
"epoch": 2.5949367088607596,
"grad_norm": 11.53801155090332,
"learning_rate": 1.4682268521211075e-06,
"loss": 1.5443,
"num_input_tokens_seen": 1679360,
"step": 205
},
{
"epoch": 2.6582278481012658,
"grad_norm": 9.935652732849121,
"learning_rate": 1.130771636389596e-06,
"loss": 1.5573,
"num_input_tokens_seen": 1720320,
"step": 210
},
{
"epoch": 2.721518987341772,
"grad_norm": 11.843810081481934,
"learning_rate": 8.350306607715774e-07,
"loss": 1.5242,
"num_input_tokens_seen": 1761280,
"step": 215
},
{
"epoch": 2.7848101265822782,
"grad_norm": 14.085565567016602,
"learning_rate": 5.823948704048443e-07,
"loss": 1.5748,
"num_input_tokens_seen": 1802240,
"step": 220
},
{
"epoch": 2.848101265822785,
"grad_norm": 12.15102767944336,
"learning_rate": 3.7405247576144055e-07,
"loss": 1.5469,
"num_input_tokens_seen": 1843200,
"step": 225
},
{
"epoch": 2.911392405063291,
"grad_norm": 15.505411148071289,
"learning_rate": 2.1098336419116628e-07,
"loss": 1.5503,
"num_input_tokens_seen": 1884160,
"step": 230
},
{
"epoch": 2.9746835443037973,
"grad_norm": 11.011249542236328,
"learning_rate": 9.395449126177291e-08,
"loss": 1.4825,
"num_input_tokens_seen": 1925120,
"step": 235
},
{
"epoch": 3.0,
"num_input_tokens_seen": 1941504,
"step": 237,
"total_flos": 3521692676653056.0,
"train_loss": 2.0961106618245444,
"train_runtime": 196.5081,
"train_samples_per_second": 9.587,
"train_steps_per_second": 1.206
}
],
"logging_steps": 5,
"max_steps": 237,
"num_input_tokens_seen": 1941504,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3521692676653056.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}