TR_QUIZ_GEN_SIMPLE_LLAMA13B / trainer_state.json
Kamyar-zeinalipour's picture
Upload folder using huggingface_hub
181ca57 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.995708154506438,
"eval_steps": 500,
"global_step": 349,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 3.838202126167235,
"learning_rate": 9.997135147120633e-05,
"loss": 2.1255,
"step": 5
},
{
"epoch": 0.09,
"grad_norm": 2.1177204859561205,
"learning_rate": 9.98854387143534e-05,
"loss": 1.7295,
"step": 10
},
{
"epoch": 0.13,
"grad_norm": 1.4704605855835866,
"learning_rate": 9.974236018040474e-05,
"loss": 1.4756,
"step": 15
},
{
"epoch": 0.17,
"grad_norm": 1.4984947152628136,
"learning_rate": 9.954227982894034e-05,
"loss": 1.3201,
"step": 20
},
{
"epoch": 0.21,
"grad_norm": 1.3923423053547814,
"learning_rate": 9.928542694026862e-05,
"loss": 1.28,
"step": 25
},
{
"epoch": 0.26,
"grad_norm": 1.3739605464965448,
"learning_rate": 9.897209585268458e-05,
"loss": 1.2543,
"step": 30
},
{
"epoch": 0.3,
"grad_norm": 1.275550734636967,
"learning_rate": 9.86026456251757e-05,
"loss": 1.2261,
"step": 35
},
{
"epoch": 0.34,
"grad_norm": 1.3278212210523785,
"learning_rate": 9.817749962596115e-05,
"loss": 1.1967,
"step": 40
},
{
"epoch": 0.39,
"grad_norm": 1.407416916788669,
"learning_rate": 9.769714504733694e-05,
"loss": 1.1876,
"step": 45
},
{
"epoch": 0.43,
"grad_norm": 1.4541097126165785,
"learning_rate": 9.716213234738215e-05,
"loss": 1.1845,
"step": 50
},
{
"epoch": 0.47,
"grad_norm": 1.4706662049445451,
"learning_rate": 9.657307461916635e-05,
"loss": 1.1681,
"step": 55
},
{
"epoch": 0.52,
"grad_norm": 1.3719130683126355,
"learning_rate": 9.59306468881811e-05,
"loss": 1.1622,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 1.3153375215490886,
"learning_rate": 9.52355853388003e-05,
"loss": 1.1298,
"step": 65
},
{
"epoch": 0.6,
"grad_norm": 1.488930807431193,
"learning_rate": 9.448868647065642e-05,
"loss": 1.1598,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 1.2649466524851178,
"learning_rate": 9.369080618589864e-05,
"loss": 1.135,
"step": 75
},
{
"epoch": 0.69,
"grad_norm": 1.4161938356640078,
"learning_rate": 9.284285880837946e-05,
"loss": 1.1325,
"step": 80
},
{
"epoch": 0.73,
"grad_norm": 1.418651947979571,
"learning_rate": 9.194581603589328e-05,
"loss": 1.1078,
"step": 85
},
{
"epoch": 0.77,
"grad_norm": 1.2926106311909347,
"learning_rate": 9.100070582666795e-05,
"loss": 1.1134,
"step": 90
},
{
"epoch": 0.82,
"grad_norm": 1.387359204219206,
"learning_rate": 9.000861122138517e-05,
"loss": 1.0808,
"step": 95
},
{
"epoch": 0.86,
"grad_norm": 1.5961791185416505,
"learning_rate": 8.897066910207958e-05,
"loss": 1.0832,
"step": 100
},
{
"epoch": 0.9,
"grad_norm": 1.476620650207239,
"learning_rate": 8.788806888933881e-05,
"loss": 1.0843,
"step": 105
},
{
"epoch": 0.94,
"grad_norm": 1.270735111137924,
"learning_rate": 8.676205117929752e-05,
"loss": 1.1029,
"step": 110
},
{
"epoch": 0.99,
"grad_norm": 1.425141501784203,
"learning_rate": 8.559390632198723e-05,
"loss": 1.101,
"step": 115
},
{
"epoch": 1.0,
"eval_loss": 1.1002874374389648,
"eval_runtime": 69.0354,
"eval_samples_per_second": 4.346,
"eval_steps_per_second": 0.55,
"step": 116
},
{
"epoch": 1.03,
"grad_norm": 1.1791383340555186,
"learning_rate": 8.438497294267117e-05,
"loss": 1.0411,
"step": 120
},
{
"epoch": 1.07,
"grad_norm": 1.333740137623119,
"learning_rate": 8.313663640785839e-05,
"loss": 1.0033,
"step": 125
},
{
"epoch": 1.12,
"grad_norm": 1.4447991614503835,
"learning_rate": 8.185032723775539e-05,
"loss": 1.035,
"step": 130
},
{
"epoch": 1.16,
"grad_norm": 1.2844125525377348,
"learning_rate": 8.052751946697403e-05,
"loss": 0.9736,
"step": 135
},
{
"epoch": 1.2,
"grad_norm": 1.2700768647818643,
"learning_rate": 7.916972895537471e-05,
"loss": 1.0296,
"step": 140
},
{
"epoch": 1.24,
"grad_norm": 1.3154218666274633,
"learning_rate": 7.777851165098012e-05,
"loss": 1.0291,
"step": 145
},
{
"epoch": 1.29,
"grad_norm": 1.1964311130128484,
"learning_rate": 7.635546180695038e-05,
"loss": 0.9498,
"step": 150
},
{
"epoch": 1.33,
"grad_norm": 1.5080490468085683,
"learning_rate": 7.490221015466279e-05,
"loss": 0.9916,
"step": 155
},
{
"epoch": 1.37,
"grad_norm": 1.4371536477549405,
"learning_rate": 7.342042203498951e-05,
"loss": 1.0462,
"step": 160
},
{
"epoch": 1.42,
"grad_norm": 1.633026684473331,
"learning_rate": 7.191179548991507e-05,
"loss": 0.9906,
"step": 165
},
{
"epoch": 1.46,
"grad_norm": 1.5739785726895548,
"learning_rate": 7.037805931668005e-05,
"loss": 1.0287,
"step": 170
},
{
"epoch": 1.5,
"grad_norm": 1.429154502412058,
"learning_rate": 6.882097108668132e-05,
"loss": 1.0154,
"step": 175
},
{
"epoch": 1.55,
"grad_norm": 1.1785345522362023,
"learning_rate": 6.724231513139852e-05,
"loss": 1.0018,
"step": 180
},
{
"epoch": 1.59,
"grad_norm": 1.6392707933966606,
"learning_rate": 6.564390049765528e-05,
"loss": 0.9824,
"step": 185
},
{
"epoch": 1.63,
"grad_norm": 1.5381793424250634,
"learning_rate": 6.402755887455792e-05,
"loss": 0.9953,
"step": 190
},
{
"epoch": 1.67,
"grad_norm": 1.2412583778357746,
"learning_rate": 6.239514249448767e-05,
"loss": 0.9974,
"step": 195
},
{
"epoch": 1.72,
"grad_norm": 1.5172544424431558,
"learning_rate": 6.0748522010551215e-05,
"loss": 0.9531,
"step": 200
},
{
"epoch": 1.76,
"grad_norm": 1.3293697921192489,
"learning_rate": 5.908958435292241e-05,
"loss": 0.9842,
"step": 205
},
{
"epoch": 1.8,
"grad_norm": 1.4362543686797302,
"learning_rate": 5.742023056653131e-05,
"loss": 0.9993,
"step": 210
},
{
"epoch": 1.85,
"grad_norm": 1.3658639776213064,
"learning_rate": 5.574237363257858e-05,
"loss": 0.9936,
"step": 215
},
{
"epoch": 1.89,
"grad_norm": 1.583719578671925,
"learning_rate": 5.4057936276371565e-05,
"loss": 0.9686,
"step": 220
},
{
"epoch": 1.93,
"grad_norm": 1.2596980255346535,
"learning_rate": 5.236884876399429e-05,
"loss": 1.0095,
"step": 225
},
{
"epoch": 1.97,
"grad_norm": 1.3013094438190969,
"learning_rate": 5.0677046690336096e-05,
"loss": 0.999,
"step": 230
},
{
"epoch": 2.0,
"eval_loss": 1.0417044162750244,
"eval_runtime": 68.6388,
"eval_samples_per_second": 4.371,
"eval_steps_per_second": 0.554,
"step": 233
},
{
"epoch": 2.02,
"grad_norm": 1.2842579006806363,
"learning_rate": 4.898446876101379e-05,
"loss": 0.9169,
"step": 235
},
{
"epoch": 2.06,
"grad_norm": 1.3799527209106432,
"learning_rate": 4.729305457072913e-05,
"loss": 0.9044,
"step": 240
},
{
"epoch": 2.1,
"grad_norm": 1.332914263778945,
"learning_rate": 4.560474238060739e-05,
"loss": 0.8905,
"step": 245
},
{
"epoch": 2.15,
"grad_norm": 1.4483104158765132,
"learning_rate": 4.392146689706425e-05,
"loss": 0.895,
"step": 250
},
{
"epoch": 2.19,
"grad_norm": 1.2300017765319582,
"learning_rate": 4.224515705474603e-05,
"loss": 0.9144,
"step": 255
},
{
"epoch": 2.23,
"grad_norm": 1.5671642418549678,
"learning_rate": 4.057773380608411e-05,
"loss": 0.8692,
"step": 260
},
{
"epoch": 2.27,
"grad_norm": 1.1553793609487584,
"learning_rate": 3.892110791999649e-05,
"loss": 0.9092,
"step": 265
},
{
"epoch": 2.32,
"grad_norm": 1.3993702614119785,
"learning_rate": 3.7277177792259114e-05,
"loss": 0.8869,
"step": 270
},
{
"epoch": 2.36,
"grad_norm": 1.512174508677905,
"learning_rate": 3.5647827270055945e-05,
"loss": 0.9104,
"step": 275
},
{
"epoch": 2.4,
"grad_norm": 1.4126799043823064,
"learning_rate": 3.403492349320101e-05,
"loss": 0.8837,
"step": 280
},
{
"epoch": 2.45,
"grad_norm": 1.3750228704249976,
"learning_rate": 3.244031475450599e-05,
"loss": 0.9164,
"step": 285
},
{
"epoch": 2.49,
"grad_norm": 1.5268920387463116,
"learning_rate": 3.086582838174551e-05,
"loss": 0.8805,
"step": 290
},
{
"epoch": 2.53,
"grad_norm": 1.4206059480865851,
"learning_rate": 2.9313268643646986e-05,
"loss": 0.8942,
"step": 295
},
{
"epoch": 2.58,
"grad_norm": 1.4073831093700344,
"learning_rate": 2.7784414682304832e-05,
"loss": 0.9289,
"step": 300
},
{
"epoch": 2.62,
"grad_norm": 1.4564634651821706,
"learning_rate": 2.628101847438835e-05,
"loss": 0.8864,
"step": 305
},
{
"epoch": 2.66,
"grad_norm": 1.4217201527416836,
"learning_rate": 2.4804802823479613e-05,
"loss": 0.8947,
"step": 310
},
{
"epoch": 2.7,
"grad_norm": 1.5100802994235183,
"learning_rate": 2.3357459385841823e-05,
"loss": 0.8829,
"step": 315
},
{
"epoch": 2.75,
"grad_norm": 1.4578776901613366,
"learning_rate": 2.194064673188089e-05,
"loss": 0.922,
"step": 320
},
{
"epoch": 2.79,
"grad_norm": 1.3183332342703558,
"learning_rate": 2.055598844552129e-05,
"loss": 0.8785,
"step": 325
},
{
"epoch": 2.83,
"grad_norm": 1.3169540028278115,
"learning_rate": 1.920507126367448e-05,
"loss": 0.8853,
"step": 330
},
{
"epoch": 2.88,
"grad_norm": 1.4997550219337101,
"learning_rate": 1.7889443257931737e-05,
"loss": 0.8932,
"step": 335
},
{
"epoch": 2.92,
"grad_norm": 1.2868613064278502,
"learning_rate": 1.6610612060565234e-05,
"loss": 0.9047,
"step": 340
},
{
"epoch": 2.96,
"grad_norm": 1.3566421396199244,
"learning_rate": 1.5370043136870148e-05,
"loss": 0.9106,
"step": 345
},
{
"epoch": 3.0,
"eval_loss": 1.0241451263427734,
"eval_runtime": 68.638,
"eval_samples_per_second": 4.371,
"eval_steps_per_second": 0.554,
"step": 349
}
],
"logging_steps": 5,
"max_steps": 464,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 2581092118822912.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}