kokovova's picture
Training in progress, step 75, checkpoint
690caa0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6012024048096193,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008016032064128256,
"grad_norm": 2.307159185409546,
"learning_rate": 3.3333333333333335e-05,
"loss": 6.425,
"step": 1
},
{
"epoch": 0.008016032064128256,
"eval_loss": 1.1576390266418457,
"eval_runtime": 10.012,
"eval_samples_per_second": 10.487,
"eval_steps_per_second": 5.294,
"step": 1
},
{
"epoch": 0.01603206412825651,
"grad_norm": 2.5824155807495117,
"learning_rate": 6.666666666666667e-05,
"loss": 6.4417,
"step": 2
},
{
"epoch": 0.02404809619238477,
"grad_norm": 3.396984577178955,
"learning_rate": 0.0001,
"loss": 8.1689,
"step": 3
},
{
"epoch": 0.03206412825651302,
"grad_norm": 2.59348726272583,
"learning_rate": 9.99524110790929e-05,
"loss": 7.313,
"step": 4
},
{
"epoch": 0.04008016032064128,
"grad_norm": 2.7657885551452637,
"learning_rate": 9.980973490458728e-05,
"loss": 8.3049,
"step": 5
},
{
"epoch": 0.04809619238476954,
"grad_norm": 2.9509098529815674,
"learning_rate": 9.957224306869053e-05,
"loss": 7.3503,
"step": 6
},
{
"epoch": 0.056112224448897796,
"grad_norm": 2.893247604370117,
"learning_rate": 9.924038765061042e-05,
"loss": 6.8023,
"step": 7
},
{
"epoch": 0.06412825651302605,
"grad_norm": 2.8917412757873535,
"learning_rate": 9.881480035599667e-05,
"loss": 7.3582,
"step": 8
},
{
"epoch": 0.07214428857715431,
"grad_norm": 3.6936254501342773,
"learning_rate": 9.829629131445342e-05,
"loss": 7.792,
"step": 9
},
{
"epoch": 0.08016032064128256,
"grad_norm": 3.8342223167419434,
"learning_rate": 9.768584753741134e-05,
"loss": 7.6313,
"step": 10
},
{
"epoch": 0.08817635270541083,
"grad_norm": 3.553842544555664,
"learning_rate": 9.698463103929542e-05,
"loss": 7.9785,
"step": 11
},
{
"epoch": 0.09619238476953908,
"grad_norm": 3.0366082191467285,
"learning_rate": 9.619397662556435e-05,
"loss": 7.9767,
"step": 12
},
{
"epoch": 0.10420841683366733,
"grad_norm": 3.304401159286499,
"learning_rate": 9.53153893518325e-05,
"loss": 7.4045,
"step": 13
},
{
"epoch": 0.11222444889779559,
"grad_norm": 3.1661627292633057,
"learning_rate": 9.435054165891109e-05,
"loss": 7.6905,
"step": 14
},
{
"epoch": 0.12024048096192384,
"grad_norm": 3.185795545578003,
"learning_rate": 9.330127018922194e-05,
"loss": 7.6014,
"step": 15
},
{
"epoch": 0.1282565130260521,
"grad_norm": 3.3107285499572754,
"learning_rate": 9.21695722906443e-05,
"loss": 7.9431,
"step": 16
},
{
"epoch": 0.13627254509018036,
"grad_norm": 2.93304181098938,
"learning_rate": 9.09576022144496e-05,
"loss": 7.275,
"step": 17
},
{
"epoch": 0.14428857715430862,
"grad_norm": 3.08390212059021,
"learning_rate": 8.966766701456177e-05,
"loss": 7.9457,
"step": 18
},
{
"epoch": 0.1523046092184369,
"grad_norm": 3.381080150604248,
"learning_rate": 8.83022221559489e-05,
"loss": 8.2009,
"step": 19
},
{
"epoch": 0.16032064128256512,
"grad_norm": 5.137476444244385,
"learning_rate": 8.68638668405062e-05,
"loss": 8.2093,
"step": 20
},
{
"epoch": 0.1683366733466934,
"grad_norm": 3.8344078063964844,
"learning_rate": 8.535533905932738e-05,
"loss": 8.4002,
"step": 21
},
{
"epoch": 0.17635270541082165,
"grad_norm": 3.9177253246307373,
"learning_rate": 8.377951038078302e-05,
"loss": 8.1867,
"step": 22
},
{
"epoch": 0.1843687374749499,
"grad_norm": 5.506999492645264,
"learning_rate": 8.213938048432697e-05,
"loss": 8.3518,
"step": 23
},
{
"epoch": 0.19238476953907815,
"grad_norm": 4.127420902252197,
"learning_rate": 8.043807145043604e-05,
"loss": 8.9159,
"step": 24
},
{
"epoch": 0.20040080160320642,
"grad_norm": 4.1928229331970215,
"learning_rate": 7.86788218175523e-05,
"loss": 8.2144,
"step": 25
},
{
"epoch": 0.20040080160320642,
"eval_loss": 0.9458172917366028,
"eval_runtime": 10.0465,
"eval_samples_per_second": 10.451,
"eval_steps_per_second": 5.275,
"step": 25
},
{
"epoch": 0.20841683366733466,
"grad_norm": 4.462054252624512,
"learning_rate": 7.68649804173412e-05,
"loss": 8.5793,
"step": 26
},
{
"epoch": 0.21643286573146292,
"grad_norm": 4.8768463134765625,
"learning_rate": 7.500000000000001e-05,
"loss": 8.9825,
"step": 27
},
{
"epoch": 0.22444889779559118,
"grad_norm": 4.757332801818848,
"learning_rate": 7.308743066175172e-05,
"loss": 8.4356,
"step": 28
},
{
"epoch": 0.23246492985971945,
"grad_norm": 4.838624477386475,
"learning_rate": 7.113091308703498e-05,
"loss": 8.3826,
"step": 29
},
{
"epoch": 0.24048096192384769,
"grad_norm": 5.770003318786621,
"learning_rate": 6.91341716182545e-05,
"loss": 8.4361,
"step": 30
},
{
"epoch": 0.24849699398797595,
"grad_norm": 7.8174028396606445,
"learning_rate": 6.710100716628344e-05,
"loss": 8.9972,
"step": 31
},
{
"epoch": 0.2565130260521042,
"grad_norm": 2.0084359645843506,
"learning_rate": 6.503528997521366e-05,
"loss": 5.4369,
"step": 32
},
{
"epoch": 0.26452905811623245,
"grad_norm": 2.1963016986846924,
"learning_rate": 6.294095225512603e-05,
"loss": 6.623,
"step": 33
},
{
"epoch": 0.2725450901803607,
"grad_norm": 2.6279807090759277,
"learning_rate": 6.0821980696905146e-05,
"loss": 6.992,
"step": 34
},
{
"epoch": 0.280561122244489,
"grad_norm": 2.3220574855804443,
"learning_rate": 5.868240888334653e-05,
"loss": 7.2082,
"step": 35
},
{
"epoch": 0.28857715430861725,
"grad_norm": 2.368709087371826,
"learning_rate": 5.6526309611002594e-05,
"loss": 7.3801,
"step": 36
},
{
"epoch": 0.2965931863727455,
"grad_norm": 2.547515869140625,
"learning_rate": 5.435778713738292e-05,
"loss": 7.3293,
"step": 37
},
{
"epoch": 0.3046092184368738,
"grad_norm": 2.321802854537964,
"learning_rate": 5.218096936826681e-05,
"loss": 6.902,
"step": 38
},
{
"epoch": 0.312625250501002,
"grad_norm": 2.3677818775177,
"learning_rate": 5e-05,
"loss": 7.6134,
"step": 39
},
{
"epoch": 0.32064128256513025,
"grad_norm": 2.6523196697235107,
"learning_rate": 4.781903063173321e-05,
"loss": 6.538,
"step": 40
},
{
"epoch": 0.3286573146292585,
"grad_norm": 2.4183263778686523,
"learning_rate": 4.564221286261709e-05,
"loss": 7.0564,
"step": 41
},
{
"epoch": 0.3366733466933868,
"grad_norm": 2.484513282775879,
"learning_rate": 4.347369038899744e-05,
"loss": 7.5796,
"step": 42
},
{
"epoch": 0.34468937875751504,
"grad_norm": 2.533151149749756,
"learning_rate": 4.131759111665349e-05,
"loss": 6.9521,
"step": 43
},
{
"epoch": 0.3527054108216433,
"grad_norm": 2.687480926513672,
"learning_rate": 3.917801930309486e-05,
"loss": 7.8218,
"step": 44
},
{
"epoch": 0.36072144288577157,
"grad_norm": 2.9025418758392334,
"learning_rate": 3.705904774487396e-05,
"loss": 8.115,
"step": 45
},
{
"epoch": 0.3687374749498998,
"grad_norm": 3.0086724758148193,
"learning_rate": 3.4964710024786354e-05,
"loss": 7.9331,
"step": 46
},
{
"epoch": 0.37675350701402804,
"grad_norm": 3.0804407596588135,
"learning_rate": 3.289899283371657e-05,
"loss": 7.3887,
"step": 47
},
{
"epoch": 0.3847695390781563,
"grad_norm": 2.9071097373962402,
"learning_rate": 3.086582838174551e-05,
"loss": 7.5355,
"step": 48
},
{
"epoch": 0.3927855711422846,
"grad_norm": 3.1507182121276855,
"learning_rate": 2.886908691296504e-05,
"loss": 8.1586,
"step": 49
},
{
"epoch": 0.40080160320641284,
"grad_norm": 3.268674850463867,
"learning_rate": 2.6912569338248315e-05,
"loss": 7.529,
"step": 50
},
{
"epoch": 0.40080160320641284,
"eval_loss": 0.9151780009269714,
"eval_runtime": 10.0555,
"eval_samples_per_second": 10.442,
"eval_steps_per_second": 5.271,
"step": 50
},
{
"epoch": 0.4088176352705411,
"grad_norm": 3.3615987300872803,
"learning_rate": 2.500000000000001e-05,
"loss": 7.638,
"step": 51
},
{
"epoch": 0.4168336673346693,
"grad_norm": 3.4315879344940186,
"learning_rate": 2.3135019582658802e-05,
"loss": 7.6535,
"step": 52
},
{
"epoch": 0.4248496993987976,
"grad_norm": 3.4674675464630127,
"learning_rate": 2.132117818244771e-05,
"loss": 7.576,
"step": 53
},
{
"epoch": 0.43286573146292584,
"grad_norm": 4.074899196624756,
"learning_rate": 1.9561928549563968e-05,
"loss": 8.2586,
"step": 54
},
{
"epoch": 0.4408817635270541,
"grad_norm": 3.7496204376220703,
"learning_rate": 1.7860619515673033e-05,
"loss": 8.2481,
"step": 55
},
{
"epoch": 0.44889779559118237,
"grad_norm": 4.384031295776367,
"learning_rate": 1.622048961921699e-05,
"loss": 8.9745,
"step": 56
},
{
"epoch": 0.45691382765531063,
"grad_norm": 4.619762897491455,
"learning_rate": 1.4644660940672627e-05,
"loss": 8.3918,
"step": 57
},
{
"epoch": 0.4649298597194389,
"grad_norm": 4.338256359100342,
"learning_rate": 1.3136133159493802e-05,
"loss": 7.4605,
"step": 58
},
{
"epoch": 0.4729458917835671,
"grad_norm": 4.962979316711426,
"learning_rate": 1.1697777844051105e-05,
"loss": 8.278,
"step": 59
},
{
"epoch": 0.48096192384769537,
"grad_norm": 5.241549968719482,
"learning_rate": 1.0332332985438248e-05,
"loss": 8.4689,
"step": 60
},
{
"epoch": 0.48897795591182364,
"grad_norm": 5.300624847412109,
"learning_rate": 9.042397785550405e-06,
"loss": 9.2895,
"step": 61
},
{
"epoch": 0.4969939879759519,
"grad_norm": 6.585608005523682,
"learning_rate": 7.830427709355725e-06,
"loss": 7.982,
"step": 62
},
{
"epoch": 0.5050100200400801,
"grad_norm": 1.4862316846847534,
"learning_rate": 6.698729810778065e-06,
"loss": 5.5931,
"step": 63
},
{
"epoch": 0.5130260521042084,
"grad_norm": 1.845563530921936,
"learning_rate": 5.649458341088915e-06,
"loss": 6.426,
"step": 64
},
{
"epoch": 0.5210420841683366,
"grad_norm": 1.8862559795379639,
"learning_rate": 4.684610648167503e-06,
"loss": 6.5485,
"step": 65
},
{
"epoch": 0.5290581162324649,
"grad_norm": 1.9703049659729004,
"learning_rate": 3.8060233744356633e-06,
"loss": 7.2592,
"step": 66
},
{
"epoch": 0.5370741482965932,
"grad_norm": 1.9892781972885132,
"learning_rate": 3.0153689607045845e-06,
"loss": 6.0312,
"step": 67
},
{
"epoch": 0.5450901803607214,
"grad_norm": 2.151519775390625,
"learning_rate": 2.314152462588659e-06,
"loss": 6.933,
"step": 68
},
{
"epoch": 0.5531062124248497,
"grad_norm": 2.203801155090332,
"learning_rate": 1.70370868554659e-06,
"loss": 6.3505,
"step": 69
},
{
"epoch": 0.561122244488978,
"grad_norm": 2.3427622318267822,
"learning_rate": 1.1851996440033319e-06,
"loss": 6.4348,
"step": 70
},
{
"epoch": 0.5691382765531062,
"grad_norm": 2.671212911605835,
"learning_rate": 7.596123493895991e-07,
"loss": 7.2945,
"step": 71
},
{
"epoch": 0.5771543086172345,
"grad_norm": 2.542541742324829,
"learning_rate": 4.277569313094809e-07,
"loss": 7.3814,
"step": 72
},
{
"epoch": 0.5851703406813628,
"grad_norm": 2.441311836242676,
"learning_rate": 1.9026509541272275e-07,
"loss": 6.5333,
"step": 73
},
{
"epoch": 0.593186372745491,
"grad_norm": 2.626361608505249,
"learning_rate": 4.7588920907110094e-08,
"loss": 6.1486,
"step": 74
},
{
"epoch": 0.6012024048096193,
"grad_norm": 2.8685286045074463,
"learning_rate": 0.0,
"loss": 7.3685,
"step": 75
},
{
"epoch": 0.6012024048096193,
"eval_loss": 0.9051383137702942,
"eval_runtime": 10.0437,
"eval_samples_per_second": 10.454,
"eval_steps_per_second": 5.277,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.893690859533107e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}