Checks / checkpoint-237 /trainer_state.json
Saurabh4509's picture
Upload my awesome model
a3b5858 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.989473684210527,
"eval_steps": 500,
"global_step": 237,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08421052631578947,
"grad_norm": 64.44918823242188,
"learning_rate": 1.6666666666666667e-05,
"loss": 5.9893,
"step": 4
},
{
"epoch": 0.16842105263157894,
"grad_norm": 1.1460206508636475,
"learning_rate": 3.888888888888889e-05,
"loss": 1.7662,
"step": 8
},
{
"epoch": 0.25263157894736843,
"grad_norm": 0.8948838710784912,
"learning_rate": 6.111111111111112e-05,
"loss": 0.8212,
"step": 12
},
{
"epoch": 0.3368421052631579,
"grad_norm": 0.5042845606803894,
"learning_rate": 8.333333333333334e-05,
"loss": 0.533,
"step": 16
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.5063032507896423,
"learning_rate": 0.00010555555555555557,
"loss": 0.425,
"step": 20
},
{
"epoch": 0.5052631578947369,
"grad_norm": 0.4761255383491516,
"learning_rate": 0.00012777777777777776,
"loss": 0.3875,
"step": 24
},
{
"epoch": 0.5894736842105263,
"grad_norm": 0.4021145701408386,
"learning_rate": 0.00015000000000000001,
"loss": 0.3799,
"step": 28
},
{
"epoch": 0.6736842105263158,
"grad_norm": 0.441383421421051,
"learning_rate": 0.00017222222222222224,
"loss": 0.3438,
"step": 32
},
{
"epoch": 0.7578947368421053,
"grad_norm": 0.3923978805541992,
"learning_rate": 0.00019444444444444446,
"loss": 0.2952,
"step": 36
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.39148256182670593,
"learning_rate": 0.00019999007677495127,
"loss": 0.2766,
"step": 40
},
{
"epoch": 0.9263157894736842,
"grad_norm": 0.3753437101840973,
"learning_rate": 0.0001999459775237086,
"loss": 0.329,
"step": 44
},
{
"epoch": 1.0105263157894737,
"grad_norm": 0.49228230118751526,
"learning_rate": 0.00019986661520865405,
"loss": 0.2808,
"step": 48
},
{
"epoch": 1.0947368421052632,
"grad_norm": 0.3359147608280182,
"learning_rate": 0.00019975201783049805,
"loss": 0.2385,
"step": 52
},
{
"epoch": 1.1789473684210527,
"grad_norm": 0.33956533670425415,
"learning_rate": 0.00019960222582162976,
"loss": 0.224,
"step": 56
},
{
"epoch": 1.263157894736842,
"grad_norm": 0.29228124022483826,
"learning_rate": 0.00019941729203185165,
"loss": 0.2062,
"step": 60
},
{
"epoch": 1.3473684210526315,
"grad_norm": 0.3463740944862366,
"learning_rate": 0.00019919728170973296,
"loss": 0.2335,
"step": 64
},
{
"epoch": 1.431578947368421,
"grad_norm": 0.30409345030784607,
"learning_rate": 0.00019894227247958845,
"loss": 0.2242,
"step": 68
},
{
"epoch": 1.5157894736842106,
"grad_norm": 0.3040483593940735,
"learning_rate": 0.00019865235431409123,
"loss": 0.1707,
"step": 72
},
{
"epoch": 1.6,
"grad_norm": 0.3167271614074707,
"learning_rate": 0.00019832762950252813,
"loss": 0.2288,
"step": 76
},
{
"epoch": 1.6842105263157894,
"grad_norm": 0.4126874804496765,
"learning_rate": 0.00019796821261471018,
"loss": 0.202,
"step": 80
},
{
"epoch": 1.768421052631579,
"grad_norm": 0.3122522830963135,
"learning_rate": 0.00019757423046054968,
"loss": 0.2209,
"step": 84
},
{
"epoch": 1.8526315789473684,
"grad_norm": 0.348960280418396,
"learning_rate": 0.00019714582204531918,
"loss": 0.1551,
"step": 88
},
{
"epoch": 1.936842105263158,
"grad_norm": 0.413482666015625,
"learning_rate": 0.00019668313852060735,
"loss": 0.1818,
"step": 92
},
{
"epoch": 2.0210526315789474,
"grad_norm": 0.2502991259098053,
"learning_rate": 0.00019618634313098952,
"loss": 0.1355,
"step": 96
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.33105117082595825,
"learning_rate": 0.00019565561115643152,
"loss": 0.1286,
"step": 100
},
{
"epoch": 2.1894736842105265,
"grad_norm": 0.3156004548072815,
"learning_rate": 0.00019509112985044717,
"loss": 0.0978,
"step": 104
},
{
"epoch": 2.2736842105263158,
"grad_norm": 0.39305001497268677,
"learning_rate": 0.00019449309837403137,
"loss": 0.1227,
"step": 108
},
{
"epoch": 2.3578947368421055,
"grad_norm": 0.3099610209465027,
"learning_rate": 0.00019386172772539162,
"loss": 0.1346,
"step": 112
},
{
"epoch": 2.442105263157895,
"grad_norm": 0.2809341847896576,
"learning_rate": 0.00019319724066550373,
"loss": 0.1223,
"step": 116
},
{
"epoch": 2.526315789473684,
"grad_norm": 0.40614527463912964,
"learning_rate": 0.00019249987163951667,
"loss": 0.1353,
"step": 120
},
{
"epoch": 2.610526315789474,
"grad_norm": 0.23737777769565582,
"learning_rate": 0.00019176986669403555,
"loss": 0.112,
"step": 124
},
{
"epoch": 2.694736842105263,
"grad_norm": 0.2852821350097656,
"learning_rate": 0.00019100748339031113,
"loss": 0.1349,
"step": 128
},
{
"epoch": 2.7789473684210524,
"grad_norm": 0.43856674432754517,
"learning_rate": 0.00019021299071336664,
"loss": 0.1179,
"step": 132
},
{
"epoch": 2.863157894736842,
"grad_norm": 0.3141747713088989,
"learning_rate": 0.00018938666897709425,
"loss": 0.1306,
"step": 136
},
{
"epoch": 2.9473684210526314,
"grad_norm": 0.2711346447467804,
"learning_rate": 0.00018852880972535432,
"loss": 0.1551,
"step": 140
},
{
"epoch": 3.031578947368421,
"grad_norm": 0.25603431463241577,
"learning_rate": 0.0001876397156291125,
"loss": 0.0794,
"step": 144
},
{
"epoch": 3.1157894736842104,
"grad_norm": 0.42339861392974854,
"learning_rate": 0.00018671970037965118,
"loss": 0.0744,
"step": 148
},
{
"epoch": 3.2,
"grad_norm": 0.2693917751312256,
"learning_rate": 0.0001857690885778923,
"loss": 0.0696,
"step": 152
},
{
"epoch": 3.2842105263157895,
"grad_norm": 0.2737679183483124,
"learning_rate": 0.0001847882156198713,
"loss": 0.0653,
"step": 156
},
{
"epoch": 3.3684210526315788,
"grad_norm": 0.4129018187522888,
"learning_rate": 0.00018377742757840244,
"loss": 0.0806,
"step": 160
},
{
"epoch": 3.4526315789473685,
"grad_norm": 0.3873012065887451,
"learning_rate": 0.00018273708108097677,
"loss": 0.0752,
"step": 164
},
{
"epoch": 3.536842105263158,
"grad_norm": 0.3241969645023346,
"learning_rate": 0.0001816675431839365,
"loss": 0.0772,
"step": 168
},
{
"epoch": 3.6210526315789475,
"grad_norm": 0.3158609867095947,
"learning_rate": 0.0001805691912429696,
"loss": 0.0802,
"step": 172
},
{
"epoch": 3.705263157894737,
"grad_norm": 0.3270516097545624,
"learning_rate": 0.00017944241277997077,
"loss": 0.0712,
"step": 176
},
{
"epoch": 3.7894736842105265,
"grad_norm": 0.30987676978111267,
"learning_rate": 0.00017828760534631565,
"loss": 0.0699,
"step": 180
},
{
"epoch": 3.873684210526316,
"grad_norm": 0.28480827808380127,
"learning_rate": 0.0001771051763825959,
"loss": 0.0858,
"step": 184
},
{
"epoch": 3.957894736842105,
"grad_norm": 0.24550119042396545,
"learning_rate": 0.0001758955430748658,
"loss": 0.0619,
"step": 188
},
{
"epoch": 4.042105263157895,
"grad_norm": 0.3428252041339874,
"learning_rate": 0.00017465913220744998,
"loss": 0.0585,
"step": 192
},
{
"epoch": 4.126315789473685,
"grad_norm": 0.32337552309036255,
"learning_rate": 0.00017339638001236492,
"loss": 0.0518,
"step": 196
},
{
"epoch": 4.2105263157894735,
"grad_norm": 0.2767057716846466,
"learning_rate": 0.00017210773201540707,
"loss": 0.0435,
"step": 200
},
{
"epoch": 4.294736842105263,
"grad_norm": 0.22791585326194763,
"learning_rate": 0.00017079364287896174,
"loss": 0.0471,
"step": 204
},
{
"epoch": 4.378947368421053,
"grad_norm": 0.3095114231109619,
"learning_rate": 0.00016945457624158871,
"loss": 0.0401,
"step": 208
},
{
"epoch": 4.463157894736842,
"grad_norm": 0.2641872465610504,
"learning_rate": 0.0001680910045544406,
"loss": 0.0447,
"step": 212
},
{
"epoch": 4.5473684210526315,
"grad_norm": 0.3095775842666626,
"learning_rate": 0.00016670340891457216,
"loss": 0.052,
"step": 216
},
{
"epoch": 4.631578947368421,
"grad_norm": 0.292422890663147,
"learning_rate": 0.00016529227889519886,
"loss": 0.0439,
"step": 220
},
{
"epoch": 4.715789473684211,
"grad_norm": 0.2715825140476227,
"learning_rate": 0.0001638581123729652,
"loss": 0.039,
"step": 224
},
{
"epoch": 4.8,
"grad_norm": 0.17827194929122925,
"learning_rate": 0.00016240141535228323,
"loss": 0.0424,
"step": 228
},
{
"epoch": 4.88421052631579,
"grad_norm": 0.21712446212768555,
"learning_rate": 0.0001609227017868033,
"loss": 0.0502,
"step": 232
},
{
"epoch": 4.968421052631579,
"grad_norm": 0.45244210958480835,
"learning_rate": 0.00015942249339808058,
"loss": 0.0548,
"step": 236
}
],
"logging_steps": 4,
"max_steps": 705,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.532279741218816e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}