en-to-paiute / trainer_state.json
jcole333's picture
Upload 13 files
042b3be verified
raw
history blame contribute delete
No virus
6.05 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 105.0,
"eval_steps": 500,
"global_step": 16485,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.1847133757961785,
"grad_norm": 1.833742380142212,
"learning_rate": 1.9872611464968155e-05,
"loss": 0.3724,
"step": 500
},
{
"epoch": 6.369426751592357,
"grad_norm": 1.515284538269043,
"learning_rate": 1.9745222929936306e-05,
"loss": 0.0612,
"step": 1000
},
{
"epoch": 9.554140127388536,
"grad_norm": 1.5868924856185913,
"learning_rate": 1.961783439490446e-05,
"loss": 0.0493,
"step": 1500
},
{
"epoch": 12.738853503184714,
"grad_norm": 1.5748449563980103,
"learning_rate": 1.9490445859872614e-05,
"loss": 0.0446,
"step": 2000
},
{
"epoch": 15.923566878980893,
"grad_norm": 1.6215589046478271,
"learning_rate": 1.9363057324840767e-05,
"loss": 0.0405,
"step": 2500
},
{
"epoch": 19.10828025477707,
"grad_norm": 1.1221493482589722,
"learning_rate": 1.9235668789808918e-05,
"loss": 0.0366,
"step": 3000
},
{
"epoch": 22.29299363057325,
"grad_norm": 1.6376855373382568,
"learning_rate": 1.910828025477707e-05,
"loss": 0.0331,
"step": 3500
},
{
"epoch": 25.477707006369428,
"grad_norm": 1.000545859336853,
"learning_rate": 1.8980891719745225e-05,
"loss": 0.0293,
"step": 4000
},
{
"epoch": 28.662420382165607,
"grad_norm": 0.6820641756057739,
"learning_rate": 1.8853503184713376e-05,
"loss": 0.0256,
"step": 4500
},
{
"epoch": 31.847133757961785,
"grad_norm": 1.1707481145858765,
"learning_rate": 1.872611464968153e-05,
"loss": 0.0221,
"step": 5000
},
{
"epoch": 35.031847133757964,
"grad_norm": 1.4677773714065552,
"learning_rate": 1.8598726114649684e-05,
"loss": 0.0185,
"step": 5500
},
{
"epoch": 38.21656050955414,
"grad_norm": 1.6114100217819214,
"learning_rate": 1.8471337579617837e-05,
"loss": 0.0157,
"step": 6000
},
{
"epoch": 41.40127388535032,
"grad_norm": 1.0237997770309448,
"learning_rate": 1.8343949044585988e-05,
"loss": 0.013,
"step": 6500
},
{
"epoch": 44.5859872611465,
"grad_norm": 0.8055925965309143,
"learning_rate": 1.821656050955414e-05,
"loss": 0.0112,
"step": 7000
},
{
"epoch": 47.77070063694268,
"grad_norm": 0.8719159364700317,
"learning_rate": 1.8089171974522295e-05,
"loss": 0.0095,
"step": 7500
},
{
"epoch": 50.955414012738856,
"grad_norm": 1.6024932861328125,
"learning_rate": 1.796178343949045e-05,
"loss": 0.0083,
"step": 8000
},
{
"epoch": 54.140127388535035,
"grad_norm": 0.6275691986083984,
"learning_rate": 1.78343949044586e-05,
"loss": 0.0076,
"step": 8500
},
{
"epoch": 57.32484076433121,
"grad_norm": 0.7005597949028015,
"learning_rate": 1.7707006369426754e-05,
"loss": 0.0065,
"step": 9000
},
{
"epoch": 60.50955414012739,
"grad_norm": 0.30579155683517456,
"learning_rate": 1.7579617834394907e-05,
"loss": 0.0063,
"step": 9500
},
{
"epoch": 63.69426751592356,
"grad_norm": 0.4344225227832794,
"learning_rate": 1.7452229299363058e-05,
"loss": 0.0059,
"step": 10000
},
{
"epoch": 66.87898089171975,
"grad_norm": 1.7709016799926758,
"learning_rate": 1.732484076433121e-05,
"loss": 0.0053,
"step": 10500
},
{
"epoch": 70.06369426751593,
"grad_norm": 0.622224748134613,
"learning_rate": 1.7197452229299365e-05,
"loss": 0.005,
"step": 11000
},
{
"epoch": 73.2484076433121,
"grad_norm": 0.564902126789093,
"learning_rate": 1.707006369426752e-05,
"loss": 0.0049,
"step": 11500
},
{
"epoch": 76.43312101910828,
"grad_norm": 0.5126436352729797,
"learning_rate": 1.694267515923567e-05,
"loss": 0.0045,
"step": 12000
},
{
"epoch": 79.61783439490446,
"grad_norm": 1.7633012533187866,
"learning_rate": 1.6815286624203824e-05,
"loss": 0.0041,
"step": 12500
},
{
"epoch": 82.80254777070064,
"grad_norm": 0.7407333850860596,
"learning_rate": 1.6687898089171977e-05,
"loss": 0.004,
"step": 13000
},
{
"epoch": 85.98726114649682,
"grad_norm": 0.5011927485466003,
"learning_rate": 1.6560509554140128e-05,
"loss": 0.0043,
"step": 13500
},
{
"epoch": 89.171974522293,
"grad_norm": 0.5946338176727295,
"learning_rate": 1.643312101910828e-05,
"loss": 0.0039,
"step": 14000
},
{
"epoch": 92.35668789808918,
"grad_norm": 0.3282039761543274,
"learning_rate": 1.6305732484076436e-05,
"loss": 0.0035,
"step": 14500
},
{
"epoch": 95.54140127388536,
"grad_norm": 0.0594528391957283,
"learning_rate": 1.617834394904459e-05,
"loss": 0.0037,
"step": 15000
},
{
"epoch": 98.72611464968153,
"grad_norm": 0.032949354499578476,
"learning_rate": 1.605095541401274e-05,
"loss": 0.0034,
"step": 15500
},
{
"epoch": 101.91082802547771,
"grad_norm": 1.0133298635482788,
"learning_rate": 1.5923566878980894e-05,
"loss": 0.0035,
"step": 16000
}
],
"logging_steps": 500,
"max_steps": 78500,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 500,
"total_flos": 2594375342751744.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}