lmg-anon's picture
Upload folder using huggingface_hub
a064569 verified
raw
history blame
17.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.20128824476650564,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002012882447665056,
"grad_norm": 22.921390533447266,
"learning_rate": 6.5e-06,
"loss": 1.3427,
"step": 1
},
{
"epoch": 0.004025764895330112,
"grad_norm": 14.772722244262695,
"learning_rate": 1.3e-05,
"loss": 1.3647,
"step": 2
},
{
"epoch": 0.006038647342995169,
"grad_norm": 11.458742141723633,
"learning_rate": 1.9499999999999996e-05,
"loss": 1.2841,
"step": 3
},
{
"epoch": 0.008051529790660225,
"grad_norm": 4.747677326202393,
"learning_rate": 2.6e-05,
"loss": 1.1644,
"step": 4
},
{
"epoch": 0.010064412238325281,
"grad_norm": 3.687121629714966,
"learning_rate": 3.25e-05,
"loss": 1.1084,
"step": 5
},
{
"epoch": 0.012077294685990338,
"grad_norm": 3.178232431411743,
"learning_rate": 3.899999999999999e-05,
"loss": 1.0753,
"step": 6
},
{
"epoch": 0.014090177133655395,
"grad_norm": 2.476033926010132,
"learning_rate": 4.5499999999999995e-05,
"loss": 1.075,
"step": 7
},
{
"epoch": 0.01610305958132045,
"grad_norm": 1.9162139892578125,
"learning_rate": 5.2e-05,
"loss": 1.0195,
"step": 8
},
{
"epoch": 0.018115942028985508,
"grad_norm": 1.797377109527588,
"learning_rate": 5.85e-05,
"loss": 0.9884,
"step": 9
},
{
"epoch": 0.020128824476650563,
"grad_norm": 1.816100835800171,
"learning_rate": 6.5e-05,
"loss": 0.9945,
"step": 10
},
{
"epoch": 0.02214170692431562,
"grad_norm": 1.843664526939392,
"learning_rate": 6.499932098548219e-05,
"loss": 0.8802,
"step": 11
},
{
"epoch": 0.024154589371980676,
"grad_norm": 1.7202250957489014,
"learning_rate": 6.49972839703017e-05,
"loss": 0.9833,
"step": 12
},
{
"epoch": 0.026167471819645734,
"grad_norm": 1.4981484413146973,
"learning_rate": 6.499388903957628e-05,
"loss": 0.9481,
"step": 13
},
{
"epoch": 0.02818035426731079,
"grad_norm": 1.4716213941574097,
"learning_rate": 6.498913633516483e-05,
"loss": 0.8809,
"step": 14
},
{
"epoch": 0.030193236714975844,
"grad_norm": 1.3817026615142822,
"learning_rate": 6.498302605566152e-05,
"loss": 0.9264,
"step": 15
},
{
"epoch": 0.0322061191626409,
"grad_norm": 1.509097695350647,
"learning_rate": 6.497555845638748e-05,
"loss": 0.8829,
"step": 16
},
{
"epoch": 0.03421900161030596,
"grad_norm": 1.5498337745666504,
"learning_rate": 6.496673384938014e-05,
"loss": 0.9227,
"step": 17
},
{
"epoch": 0.036231884057971016,
"grad_norm": 1.527828574180603,
"learning_rate": 6.49565526033802e-05,
"loss": 0.9488,
"step": 18
},
{
"epoch": 0.038244766505636074,
"grad_norm": 1.4578592777252197,
"learning_rate": 6.494501514381621e-05,
"loss": 0.8845,
"step": 19
},
{
"epoch": 0.040257648953301126,
"grad_norm": 1.4198046922683716,
"learning_rate": 6.493212195278678e-05,
"loss": 0.8809,
"step": 20
},
{
"epoch": 0.042270531400966184,
"grad_norm": 1.4455400705337524,
"learning_rate": 6.491787356904047e-05,
"loss": 0.8686,
"step": 21
},
{
"epoch": 0.04428341384863124,
"grad_norm": 1.5711616277694702,
"learning_rate": 6.490227058795323e-05,
"loss": 0.96,
"step": 22
},
{
"epoch": 0.046296296296296294,
"grad_norm": 1.4603729248046875,
"learning_rate": 6.488531366150359e-05,
"loss": 0.9026,
"step": 23
},
{
"epoch": 0.04830917874396135,
"grad_norm": 1.4184608459472656,
"learning_rate": 6.48670034982453e-05,
"loss": 0.9426,
"step": 24
},
{
"epoch": 0.05032206119162641,
"grad_norm": 1.4080291986465454,
"learning_rate": 6.484734086327788e-05,
"loss": 0.8857,
"step": 25
},
{
"epoch": 0.05233494363929147,
"grad_norm": 1.4739091396331787,
"learning_rate": 6.482632657821454e-05,
"loss": 0.8897,
"step": 26
},
{
"epoch": 0.05434782608695652,
"grad_norm": 1.320746898651123,
"learning_rate": 6.480396152114787e-05,
"loss": 0.8512,
"step": 27
},
{
"epoch": 0.05636070853462158,
"grad_norm": 1.4142905473709106,
"learning_rate": 6.478024662661315e-05,
"loss": 0.9459,
"step": 28
},
{
"epoch": 0.05837359098228664,
"grad_norm": 1.3830432891845703,
"learning_rate": 6.47551828855493e-05,
"loss": 0.8605,
"step": 29
},
{
"epoch": 0.06038647342995169,
"grad_norm": 1.3653528690338135,
"learning_rate": 6.472877134525753e-05,
"loss": 0.8446,
"step": 30
},
{
"epoch": 0.06239935587761675,
"grad_norm": 1.3041807413101196,
"learning_rate": 6.470101310935746e-05,
"loss": 0.8407,
"step": 31
},
{
"epoch": 0.0644122383252818,
"grad_norm": 1.3803820610046387,
"learning_rate": 6.467190933774112e-05,
"loss": 0.8505,
"step": 32
},
{
"epoch": 0.06642512077294686,
"grad_norm": 1.3407087326049805,
"learning_rate": 6.464146124652441e-05,
"loss": 0.794,
"step": 33
},
{
"epoch": 0.06843800322061191,
"grad_norm": 1.4283268451690674,
"learning_rate": 6.460967010799629e-05,
"loss": 0.9139,
"step": 34
},
{
"epoch": 0.07045088566827697,
"grad_norm": 1.3512784242630005,
"learning_rate": 6.457653725056568e-05,
"loss": 0.807,
"step": 35
},
{
"epoch": 0.07246376811594203,
"grad_norm": 1.3088512420654297,
"learning_rate": 6.454206405870587e-05,
"loss": 0.8968,
"step": 36
},
{
"epoch": 0.07447665056360708,
"grad_norm": 1.2908034324645996,
"learning_rate": 6.450625197289675e-05,
"loss": 0.8447,
"step": 37
},
{
"epoch": 0.07648953301127215,
"grad_norm": 1.3444992303848267,
"learning_rate": 6.446910248956453e-05,
"loss": 0.8891,
"step": 38
},
{
"epoch": 0.0785024154589372,
"grad_norm": 1.2567275762557983,
"learning_rate": 6.443061716101926e-05,
"loss": 0.8134,
"step": 39
},
{
"epoch": 0.08051529790660225,
"grad_norm": 1.3401930332183838,
"learning_rate": 6.439079759539e-05,
"loss": 0.8751,
"step": 40
},
{
"epoch": 0.08252818035426732,
"grad_norm": 1.3348793983459473,
"learning_rate": 6.434964545655754e-05,
"loss": 0.7942,
"step": 41
},
{
"epoch": 0.08454106280193237,
"grad_norm": 1.311357021331787,
"learning_rate": 6.430716246408493e-05,
"loss": 0.8461,
"step": 42
},
{
"epoch": 0.08655394524959742,
"grad_norm": 1.3319345712661743,
"learning_rate": 6.426335039314566e-05,
"loss": 0.8821,
"step": 43
},
{
"epoch": 0.08856682769726248,
"grad_norm": 1.304795742034912,
"learning_rate": 6.421821107444936e-05,
"loss": 0.8655,
"step": 44
},
{
"epoch": 0.09057971014492754,
"grad_norm": 1.422869086265564,
"learning_rate": 6.417174639416547e-05,
"loss": 0.9261,
"step": 45
},
{
"epoch": 0.09259259259259259,
"grad_norm": 1.2931973934173584,
"learning_rate": 6.412395829384427e-05,
"loss": 0.8157,
"step": 46
},
{
"epoch": 0.09460547504025765,
"grad_norm": 1.2984646558761597,
"learning_rate": 6.407484877033586e-05,
"loss": 0.7919,
"step": 47
},
{
"epoch": 0.0966183574879227,
"grad_norm": 1.2596486806869507,
"learning_rate": 6.402441987570665e-05,
"loss": 0.7682,
"step": 48
},
{
"epoch": 0.09863123993558776,
"grad_norm": 1.3544796705245972,
"learning_rate": 6.397267371715368e-05,
"loss": 0.9084,
"step": 49
},
{
"epoch": 0.10064412238325282,
"grad_norm": 1.2623202800750732,
"learning_rate": 6.391961245691652e-05,
"loss": 0.8482,
"step": 50
},
{
"epoch": 0.10265700483091787,
"grad_norm": 1.320026159286499,
"learning_rate": 6.386523831218689e-05,
"loss": 0.8436,
"step": 51
},
{
"epoch": 0.10466988727858294,
"grad_norm": 1.4064967632293701,
"learning_rate": 6.38095535550161e-05,
"loss": 0.8962,
"step": 52
},
{
"epoch": 0.10668276972624799,
"grad_norm": 1.3178116083145142,
"learning_rate": 6.375256051222009e-05,
"loss": 0.8064,
"step": 53
},
{
"epoch": 0.10869565217391304,
"grad_norm": 1.2729371786117554,
"learning_rate": 6.369426156528213e-05,
"loss": 0.8338,
"step": 54
},
{
"epoch": 0.1107085346215781,
"grad_norm": 1.2167425155639648,
"learning_rate": 6.363465915025339e-05,
"loss": 0.8078,
"step": 55
},
{
"epoch": 0.11272141706924316,
"grad_norm": 1.3006845712661743,
"learning_rate": 6.357375575765108e-05,
"loss": 0.8462,
"step": 56
},
{
"epoch": 0.11473429951690821,
"grad_norm": 1.245822787284851,
"learning_rate": 6.351155393235449e-05,
"loss": 0.8394,
"step": 57
},
{
"epoch": 0.11674718196457327,
"grad_norm": 1.2608625888824463,
"learning_rate": 6.34480562734985e-05,
"loss": 0.8606,
"step": 58
},
{
"epoch": 0.11876006441223833,
"grad_norm": 1.2614774703979492,
"learning_rate": 6.33832654343651e-05,
"loss": 0.7633,
"step": 59
},
{
"epoch": 0.12077294685990338,
"grad_norm": 1.3382893800735474,
"learning_rate": 6.331718412227247e-05,
"loss": 0.8514,
"step": 60
},
{
"epoch": 0.12278582930756844,
"grad_norm": 1.2239303588867188,
"learning_rate": 6.324981509846189e-05,
"loss": 0.8319,
"step": 61
},
{
"epoch": 0.1247987117552335,
"grad_norm": 1.3828685283660889,
"learning_rate": 6.318116117798225e-05,
"loss": 0.8683,
"step": 62
},
{
"epoch": 0.12681159420289856,
"grad_norm": 1.304027795791626,
"learning_rate": 6.311122522957255e-05,
"loss": 0.887,
"step": 63
},
{
"epoch": 0.1288244766505636,
"grad_norm": 1.3097902536392212,
"learning_rate": 6.304001017554202e-05,
"loss": 0.8636,
"step": 64
},
{
"epoch": 0.13083735909822866,
"grad_norm": 1.281269907951355,
"learning_rate": 6.296751899164788e-05,
"loss": 0.7537,
"step": 65
},
{
"epoch": 0.13285024154589373,
"grad_norm": 1.2349010705947876,
"learning_rate": 6.289375470697116e-05,
"loss": 0.8451,
"step": 66
},
{
"epoch": 0.13486312399355876,
"grad_norm": 1.2939257621765137,
"learning_rate": 6.281872040379001e-05,
"loss": 0.8249,
"step": 67
},
{
"epoch": 0.13687600644122383,
"grad_norm": 1.308341145515442,
"learning_rate": 6.274241921745097e-05,
"loss": 0.8764,
"step": 68
},
{
"epoch": 0.1388888888888889,
"grad_norm": 1.311805009841919,
"learning_rate": 6.266485433623794e-05,
"loss": 0.8308,
"step": 69
},
{
"epoch": 0.14090177133655393,
"grad_norm": 1.6127303838729858,
"learning_rate": 6.258602900123894e-05,
"loss": 0.8069,
"step": 70
},
{
"epoch": 0.142914653784219,
"grad_norm": 1.292090654373169,
"learning_rate": 6.250594650621067e-05,
"loss": 0.8174,
"step": 71
},
{
"epoch": 0.14492753623188406,
"grad_norm": 1.256778359413147,
"learning_rate": 6.2424610197441e-05,
"loss": 0.7956,
"step": 72
},
{
"epoch": 0.1469404186795491,
"grad_norm": 1.2579149007797241,
"learning_rate": 6.23420234736089e-05,
"loss": 0.8267,
"step": 73
},
{
"epoch": 0.14895330112721417,
"grad_norm": 1.3226171731948853,
"learning_rate": 6.225818978564269e-05,
"loss": 0.9322,
"step": 74
},
{
"epoch": 0.15096618357487923,
"grad_norm": 1.1891167163848877,
"learning_rate": 6.217311263657565e-05,
"loss": 0.7886,
"step": 75
},
{
"epoch": 0.1529790660225443,
"grad_norm": 1.2481876611709595,
"learning_rate": 6.208679558139977e-05,
"loss": 0.7832,
"step": 76
},
{
"epoch": 0.15499194847020933,
"grad_norm": 1.2592449188232422,
"learning_rate": 6.199924222691707e-05,
"loss": 0.8061,
"step": 77
},
{
"epoch": 0.1570048309178744,
"grad_norm": 1.250359058380127,
"learning_rate": 6.191045623158907e-05,
"loss": 0.7942,
"step": 78
},
{
"epoch": 0.15901771336553946,
"grad_norm": 1.2406524419784546,
"learning_rate": 6.182044130538373e-05,
"loss": 0.7766,
"step": 79
},
{
"epoch": 0.1610305958132045,
"grad_norm": 1.2620049715042114,
"learning_rate": 6.172920120962052e-05,
"loss": 0.8445,
"step": 80
},
{
"epoch": 0.16304347826086957,
"grad_norm": 1.2618622779846191,
"learning_rate": 6.16367397568133e-05,
"loss": 0.812,
"step": 81
},
{
"epoch": 0.16505636070853463,
"grad_norm": 1.2560168504714966,
"learning_rate": 6.15430608105109e-05,
"loss": 0.8331,
"step": 82
},
{
"epoch": 0.16706924315619967,
"grad_norm": 1.2386751174926758,
"learning_rate": 6.144816828513576e-05,
"loss": 0.7318,
"step": 83
},
{
"epoch": 0.16908212560386474,
"grad_norm": 1.3558485507965088,
"learning_rate": 6.135206614582031e-05,
"loss": 0.8476,
"step": 84
},
{
"epoch": 0.1710950080515298,
"grad_norm": 1.2461968660354614,
"learning_rate": 6.125475840824137e-05,
"loss": 0.8012,
"step": 85
},
{
"epoch": 0.17310789049919484,
"grad_norm": 1.2398920059204102,
"learning_rate": 6.115624913845225e-05,
"loss": 0.8033,
"step": 86
},
{
"epoch": 0.1751207729468599,
"grad_norm": 1.3263791799545288,
"learning_rate": 6.105654245271292e-05,
"loss": 0.8511,
"step": 87
},
{
"epoch": 0.17713365539452497,
"grad_norm": 1.2072999477386475,
"learning_rate": 6.095564251731795e-05,
"loss": 0.7478,
"step": 88
},
{
"epoch": 0.17914653784219,
"grad_norm": 1.287801742553711,
"learning_rate": 6.085355354842249e-05,
"loss": 0.8152,
"step": 89
},
{
"epoch": 0.18115942028985507,
"grad_norm": 1.2826151847839355,
"learning_rate": 6.0750279811866046e-05,
"loss": 0.7918,
"step": 90
},
{
"epoch": 0.18317230273752014,
"grad_norm": 1.276780605316162,
"learning_rate": 6.0645825622994254e-05,
"loss": 0.8774,
"step": 91
},
{
"epoch": 0.18518518518518517,
"grad_norm": 1.2205020189285278,
"learning_rate": 6.0540195346478515e-05,
"loss": 0.8398,
"step": 92
},
{
"epoch": 0.18719806763285024,
"grad_norm": 1.2247477769851685,
"learning_rate": 6.043339339613371e-05,
"loss": 0.8322,
"step": 93
},
{
"epoch": 0.1892109500805153,
"grad_norm": 1.3282443284988403,
"learning_rate": 6.0325424234733656e-05,
"loss": 0.8003,
"step": 94
},
{
"epoch": 0.19122383252818034,
"grad_norm": 1.3472024202346802,
"learning_rate": 6.0216292373824696e-05,
"loss": 0.8305,
"step": 95
},
{
"epoch": 0.1932367149758454,
"grad_norm": 1.2812607288360596,
"learning_rate": 6.010600237353716e-05,
"loss": 0.7657,
"step": 96
},
{
"epoch": 0.19524959742351047,
"grad_norm": 1.2958053350448608,
"learning_rate": 5.999455884239483e-05,
"loss": 0.869,
"step": 97
},
{
"epoch": 0.1972624798711755,
"grad_norm": 1.2021034955978394,
"learning_rate": 5.988196643712235e-05,
"loss": 0.7798,
"step": 98
},
{
"epoch": 0.19927536231884058,
"grad_norm": 1.2430065870285034,
"learning_rate": 5.976822986245067e-05,
"loss": 0.7496,
"step": 99
},
{
"epoch": 0.20128824476650564,
"grad_norm": 1.2749730348587036,
"learning_rate": 5.9653353870920434e-05,
"loss": 0.831,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 496,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.189794361882706e+17,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}