TR_QUIZ_GEN_SIMPLE_LLAMA7B / trainer_state.json
Kamyar-zeinalipour's picture
Upload folder using huggingface_hub
f148cd3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.995708154506438,
"eval_steps": 500,
"global_step": 349,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 5.055780862946804,
"learning_rate": 9.997135147120633e-05,
"loss": 2.3426,
"step": 5
},
{
"epoch": 0.09,
"grad_norm": 3.9099604179547867,
"learning_rate": 9.98854387143534e-05,
"loss": 1.9269,
"step": 10
},
{
"epoch": 0.13,
"grad_norm": 2.3917278619890237,
"learning_rate": 9.974236018040474e-05,
"loss": 1.6971,
"step": 15
},
{
"epoch": 0.17,
"grad_norm": 2.0300075071526407,
"learning_rate": 9.954227982894034e-05,
"loss": 1.5024,
"step": 20
},
{
"epoch": 0.21,
"grad_norm": 2.5503247326265703,
"learning_rate": 9.928542694026862e-05,
"loss": 1.4291,
"step": 25
},
{
"epoch": 0.26,
"grad_norm": 2.8860783306647813,
"learning_rate": 9.897209585268458e-05,
"loss": 1.3888,
"step": 30
},
{
"epoch": 0.3,
"grad_norm": 1.7598771943831562,
"learning_rate": 9.86026456251757e-05,
"loss": 1.3614,
"step": 35
},
{
"epoch": 0.34,
"grad_norm": 1.8472495254954506,
"learning_rate": 9.817749962596115e-05,
"loss": 1.3218,
"step": 40
},
{
"epoch": 0.39,
"grad_norm": 1.8685305808519006,
"learning_rate": 9.769714504733694e-05,
"loss": 1.3144,
"step": 45
},
{
"epoch": 0.43,
"grad_norm": 1.6934593149050314,
"learning_rate": 9.716213234738215e-05,
"loss": 1.3105,
"step": 50
},
{
"epoch": 0.47,
"grad_norm": 1.6536690261785199,
"learning_rate": 9.657307461916635e-05,
"loss": 1.287,
"step": 55
},
{
"epoch": 0.52,
"grad_norm": 1.8152365885852073,
"learning_rate": 9.59306468881811e-05,
"loss": 1.2877,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 1.6831619071424775,
"learning_rate": 9.52355853388003e-05,
"loss": 1.2504,
"step": 65
},
{
"epoch": 0.6,
"grad_norm": 1.8966578603527602,
"learning_rate": 9.448868647065642e-05,
"loss": 1.2782,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 1.9475911961026766,
"learning_rate": 9.369080618589864e-05,
"loss": 1.2517,
"step": 75
},
{
"epoch": 0.69,
"grad_norm": 1.7144643185356307,
"learning_rate": 9.284285880837946e-05,
"loss": 1.2442,
"step": 80
},
{
"epoch": 0.73,
"grad_norm": 1.7507182276034248,
"learning_rate": 9.194581603589328e-05,
"loss": 1.2174,
"step": 85
},
{
"epoch": 0.77,
"grad_norm": 1.782170598732355,
"learning_rate": 9.100070582666795e-05,
"loss": 1.2231,
"step": 90
},
{
"epoch": 0.82,
"grad_norm": 1.884782447980567,
"learning_rate": 9.000861122138517e-05,
"loss": 1.1901,
"step": 95
},
{
"epoch": 0.86,
"grad_norm": 1.9269085187568622,
"learning_rate": 8.897066910207958e-05,
"loss": 1.1959,
"step": 100
},
{
"epoch": 0.9,
"grad_norm": 2.0913092281668977,
"learning_rate": 8.788806888933881e-05,
"loss": 1.1953,
"step": 105
},
{
"epoch": 0.94,
"grad_norm": 1.6532109062340223,
"learning_rate": 8.676205117929752e-05,
"loss": 1.2135,
"step": 110
},
{
"epoch": 0.99,
"grad_norm": 1.9499528694077117,
"learning_rate": 8.559390632198723e-05,
"loss": 1.2141,
"step": 115
},
{
"epoch": 1.0,
"eval_loss": 1.2135138511657715,
"eval_runtime": 39.1621,
"eval_samples_per_second": 7.66,
"eval_steps_per_second": 0.97,
"step": 116
},
{
"epoch": 1.03,
"grad_norm": 1.6359783575613835,
"learning_rate": 8.438497294267117e-05,
"loss": 1.1555,
"step": 120
},
{
"epoch": 1.07,
"grad_norm": 1.9376629758093418,
"learning_rate": 8.313663640785839e-05,
"loss": 1.123,
"step": 125
},
{
"epoch": 1.12,
"grad_norm": 1.8078455578062511,
"learning_rate": 8.185032723775539e-05,
"loss": 1.1545,
"step": 130
},
{
"epoch": 1.16,
"grad_norm": 1.78298002573264,
"learning_rate": 8.052751946697403e-05,
"loss": 1.0917,
"step": 135
},
{
"epoch": 1.2,
"grad_norm": 1.8433442214623892,
"learning_rate": 7.916972895537471e-05,
"loss": 1.1507,
"step": 140
},
{
"epoch": 1.24,
"grad_norm": 1.7754964498370087,
"learning_rate": 7.777851165098012e-05,
"loss": 1.1461,
"step": 145
},
{
"epoch": 1.29,
"grad_norm": 1.9524999874543483,
"learning_rate": 7.635546180695038e-05,
"loss": 1.0643,
"step": 150
},
{
"epoch": 1.33,
"grad_norm": 1.7260817926145178,
"learning_rate": 7.490221015466279e-05,
"loss": 1.1075,
"step": 155
},
{
"epoch": 1.37,
"grad_norm": 2.253848011637847,
"learning_rate": 7.342042203498951e-05,
"loss": 1.1714,
"step": 160
},
{
"epoch": 1.42,
"grad_norm": 1.9774736597698745,
"learning_rate": 7.191179548991507e-05,
"loss": 1.1076,
"step": 165
},
{
"epoch": 1.46,
"grad_norm": 2.0714596334335673,
"learning_rate": 7.037805931668005e-05,
"loss": 1.1452,
"step": 170
},
{
"epoch": 1.5,
"grad_norm": 1.7678144581225992,
"learning_rate": 6.882097108668132e-05,
"loss": 1.1356,
"step": 175
},
{
"epoch": 1.55,
"grad_norm": 1.709740560594397,
"learning_rate": 6.724231513139852e-05,
"loss": 1.1126,
"step": 180
},
{
"epoch": 1.59,
"grad_norm": 1.7902700297237863,
"learning_rate": 6.564390049765528e-05,
"loss": 1.0937,
"step": 185
},
{
"epoch": 1.63,
"grad_norm": 1.8180035012469915,
"learning_rate": 6.402755887455792e-05,
"loss": 1.1122,
"step": 190
},
{
"epoch": 1.67,
"grad_norm": 1.8387833131421265,
"learning_rate": 6.239514249448767e-05,
"loss": 1.1156,
"step": 195
},
{
"epoch": 1.72,
"grad_norm": 1.7206104055323197,
"learning_rate": 6.0748522010551215e-05,
"loss": 1.0624,
"step": 200
},
{
"epoch": 1.76,
"grad_norm": 1.96126387756175,
"learning_rate": 5.908958435292241e-05,
"loss": 1.0896,
"step": 205
},
{
"epoch": 1.8,
"grad_norm": 1.7505678845455834,
"learning_rate": 5.742023056653131e-05,
"loss": 1.1093,
"step": 210
},
{
"epoch": 1.85,
"grad_norm": 1.88819291262154,
"learning_rate": 5.574237363257858e-05,
"loss": 1.1057,
"step": 215
},
{
"epoch": 1.89,
"grad_norm": 1.981696800102056,
"learning_rate": 5.4057936276371565e-05,
"loss": 1.0809,
"step": 220
},
{
"epoch": 1.93,
"grad_norm": 2.0184048752186734,
"learning_rate": 5.236884876399429e-05,
"loss": 1.1245,
"step": 225
},
{
"epoch": 1.97,
"grad_norm": 1.7913247269002772,
"learning_rate": 5.0677046690336096e-05,
"loss": 1.1107,
"step": 230
},
{
"epoch": 2.0,
"eval_loss": 1.147458553314209,
"eval_runtime": 38.5013,
"eval_samples_per_second": 7.792,
"eval_steps_per_second": 0.987,
"step": 233
},
{
"epoch": 2.02,
"grad_norm": 1.573903357064543,
"learning_rate": 4.898446876101379e-05,
"loss": 1.0289,
"step": 235
},
{
"epoch": 2.06,
"grad_norm": 1.886593037989885,
"learning_rate": 4.729305457072913e-05,
"loss": 1.0287,
"step": 240
},
{
"epoch": 2.1,
"grad_norm": 2.3850699585618855,
"learning_rate": 4.560474238060739e-05,
"loss": 1.0079,
"step": 245
},
{
"epoch": 2.15,
"grad_norm": 1.8897004088761564,
"learning_rate": 4.392146689706425e-05,
"loss": 1.0176,
"step": 250
},
{
"epoch": 2.19,
"grad_norm": 1.6637180057615024,
"learning_rate": 4.224515705474603e-05,
"loss": 1.0393,
"step": 255
},
{
"epoch": 2.23,
"grad_norm": 1.9082972238615958,
"learning_rate": 4.057773380608411e-05,
"loss": 0.9883,
"step": 260
},
{
"epoch": 2.27,
"grad_norm": 1.6248508379655304,
"learning_rate": 3.892110791999649e-05,
"loss": 1.0335,
"step": 265
},
{
"epoch": 2.32,
"grad_norm": 2.057742891690489,
"learning_rate": 3.7277177792259114e-05,
"loss": 1.0055,
"step": 270
},
{
"epoch": 2.36,
"grad_norm": 2.1082750754536725,
"learning_rate": 3.5647827270055945e-05,
"loss": 1.0363,
"step": 275
},
{
"epoch": 2.4,
"grad_norm": 1.954319411917034,
"learning_rate": 3.403492349320101e-05,
"loss": 1.0049,
"step": 280
},
{
"epoch": 2.45,
"grad_norm": 1.835816144902083,
"learning_rate": 3.244031475450599e-05,
"loss": 1.0382,
"step": 285
},
{
"epoch": 2.49,
"grad_norm": 1.905542574366058,
"learning_rate": 3.086582838174551e-05,
"loss": 1.0012,
"step": 290
},
{
"epoch": 2.53,
"grad_norm": 1.9640737297497406,
"learning_rate": 2.9313268643646986e-05,
"loss": 1.0183,
"step": 295
},
{
"epoch": 2.58,
"grad_norm": 1.8759801566914525,
"learning_rate": 2.7784414682304832e-05,
"loss": 1.0513,
"step": 300
},
{
"epoch": 2.62,
"grad_norm": 1.7782596177203436,
"learning_rate": 2.628101847438835e-05,
"loss": 1.0053,
"step": 305
},
{
"epoch": 2.66,
"grad_norm": 1.8683159360283232,
"learning_rate": 2.4804802823479613e-05,
"loss": 1.0159,
"step": 310
},
{
"epoch": 2.7,
"grad_norm": 2.234906406234719,
"learning_rate": 2.3357459385841823e-05,
"loss": 1.0041,
"step": 315
},
{
"epoch": 2.75,
"grad_norm": 1.8995547320155095,
"learning_rate": 2.194064673188089e-05,
"loss": 1.0403,
"step": 320
},
{
"epoch": 2.79,
"grad_norm": 1.8065108976199886,
"learning_rate": 2.055598844552129e-05,
"loss": 1.0004,
"step": 325
},
{
"epoch": 2.83,
"grad_norm": 1.7757725659621284,
"learning_rate": 1.920507126367448e-05,
"loss": 1.0075,
"step": 330
},
{
"epoch": 2.88,
"grad_norm": 1.7478001464098316,
"learning_rate": 1.7889443257931737e-05,
"loss": 1.0163,
"step": 335
},
{
"epoch": 2.92,
"grad_norm": 1.7453331826305865,
"learning_rate": 1.6610612060565234e-05,
"loss": 1.0246,
"step": 340
},
{
"epoch": 2.96,
"grad_norm": 2.015231393689711,
"learning_rate": 1.5370043136870148e-05,
"loss": 1.0339,
"step": 345
},
{
"epoch": 3.0,
"eval_loss": 1.123692274093628,
"eval_runtime": 38.4705,
"eval_samples_per_second": 7.798,
"eval_steps_per_second": 0.988,
"step": 349
}
],
"logging_steps": 5,
"max_steps": 464,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 1663515060338688.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}