zlm_b64_le5_s12000 / checkpoint-1500 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 1500, checkpoint
2e320e7 verified
raw
history blame
6.5 kB
{
"best_metric": 0.4628298878669739,
"best_model_checkpoint": "mikhail_panzo/zlm_b64_le5_s12000/checkpoint-1500",
"epoch": 1.256544502617801,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.041884816753926704,
"grad_norm": 12.821951866149902,
"learning_rate": 2.4000000000000003e-07,
"loss": 1.1152,
"step": 50
},
{
"epoch": 0.08376963350785341,
"grad_norm": 6.186221122741699,
"learning_rate": 4.900000000000001e-07,
"loss": 1.0584,
"step": 100
},
{
"epoch": 0.1256544502617801,
"grad_norm": 13.08674430847168,
"learning_rate": 7.4e-07,
"loss": 0.9996,
"step": 150
},
{
"epoch": 0.16753926701570682,
"grad_norm": 4.10632848739624,
"learning_rate": 9.9e-07,
"loss": 0.9355,
"step": 200
},
{
"epoch": 0.2094240837696335,
"grad_norm": 3.2428064346313477,
"learning_rate": 1.2400000000000002e-06,
"loss": 0.852,
"step": 250
},
{
"epoch": 0.2513089005235602,
"grad_norm": 2.0861401557922363,
"learning_rate": 1.4900000000000001e-06,
"loss": 0.8241,
"step": 300
},
{
"epoch": 0.2931937172774869,
"grad_norm": 8.602317810058594,
"learning_rate": 1.74e-06,
"loss": 0.7629,
"step": 350
},
{
"epoch": 0.33507853403141363,
"grad_norm": 2.7594990730285645,
"learning_rate": 1.9900000000000004e-06,
"loss": 0.7418,
"step": 400
},
{
"epoch": 0.3769633507853403,
"grad_norm": 2.7047348022460938,
"learning_rate": 2.24e-06,
"loss": 0.7166,
"step": 450
},
{
"epoch": 0.418848167539267,
"grad_norm": 2.0666158199310303,
"learning_rate": 2.4900000000000003e-06,
"loss": 0.6876,
"step": 500
},
{
"epoch": 0.418848167539267,
"eval_loss": 0.5937426090240479,
"eval_runtime": 263.0955,
"eval_samples_per_second": 32.266,
"eval_steps_per_second": 4.037,
"step": 500
},
{
"epoch": 0.4607329842931937,
"grad_norm": 2.955193519592285,
"learning_rate": 2.7400000000000004e-06,
"loss": 0.673,
"step": 550
},
{
"epoch": 0.5026178010471204,
"grad_norm": 4.856322765350342,
"learning_rate": 2.99e-06,
"loss": 0.6539,
"step": 600
},
{
"epoch": 0.5445026178010471,
"grad_norm": 7.631103992462158,
"learning_rate": 3.2400000000000003e-06,
"loss": 0.6588,
"step": 650
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.715771436691284,
"learning_rate": 3.49e-06,
"loss": 0.624,
"step": 700
},
{
"epoch": 0.6282722513089005,
"grad_norm": 2.7684273719787598,
"learning_rate": 3.74e-06,
"loss": 0.6116,
"step": 750
},
{
"epoch": 0.6701570680628273,
"grad_norm": 2.219599723815918,
"learning_rate": 3.990000000000001e-06,
"loss": 0.6097,
"step": 800
},
{
"epoch": 0.7120418848167539,
"grad_norm": 3.749188184738159,
"learning_rate": 4.24e-06,
"loss": 0.614,
"step": 850
},
{
"epoch": 0.7539267015706806,
"grad_norm": 2.120619773864746,
"learning_rate": 4.49e-06,
"loss": 0.5814,
"step": 900
},
{
"epoch": 0.7958115183246073,
"grad_norm": 2.7725327014923096,
"learning_rate": 4.74e-06,
"loss": 0.5895,
"step": 950
},
{
"epoch": 0.837696335078534,
"grad_norm": 9.851961135864258,
"learning_rate": 4.9900000000000005e-06,
"loss": 0.5623,
"step": 1000
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4933945834636688,
"eval_runtime": 258.739,
"eval_samples_per_second": 32.809,
"eval_steps_per_second": 4.105,
"step": 1000
},
{
"epoch": 0.8795811518324608,
"grad_norm": 5.074089527130127,
"learning_rate": 5.240000000000001e-06,
"loss": 0.5748,
"step": 1050
},
{
"epoch": 0.9214659685863874,
"grad_norm": 5.884639739990234,
"learning_rate": 5.485e-06,
"loss": 0.5395,
"step": 1100
},
{
"epoch": 0.9633507853403142,
"grad_norm": 3.1588447093963623,
"learning_rate": 5.735e-06,
"loss": 0.5478,
"step": 1150
},
{
"epoch": 1.0052356020942408,
"grad_norm": 2.4452970027923584,
"learning_rate": 5.985000000000001e-06,
"loss": 0.538,
"step": 1200
},
{
"epoch": 1.0471204188481675,
"grad_norm": 8.290769577026367,
"learning_rate": 6.235000000000001e-06,
"loss": 0.5411,
"step": 1250
},
{
"epoch": 1.0890052356020943,
"grad_norm": 4.080046653747559,
"learning_rate": 6.485000000000001e-06,
"loss": 0.5387,
"step": 1300
},
{
"epoch": 1.130890052356021,
"grad_norm": 2.7520201206207275,
"learning_rate": 6.735000000000001e-06,
"loss": 0.5426,
"step": 1350
},
{
"epoch": 1.1727748691099475,
"grad_norm": 3.8192107677459717,
"learning_rate": 6.985000000000001e-06,
"loss": 0.5315,
"step": 1400
},
{
"epoch": 1.2146596858638743,
"grad_norm": 9.535676956176758,
"learning_rate": 7.235000000000001e-06,
"loss": 0.5159,
"step": 1450
},
{
"epoch": 1.256544502617801,
"grad_norm": 2.5731022357940674,
"learning_rate": 7.485000000000001e-06,
"loss": 0.5165,
"step": 1500
},
{
"epoch": 1.256544502617801,
"eval_loss": 0.4628298878669739,
"eval_runtime": 257.0265,
"eval_samples_per_second": 33.028,
"eval_steps_per_second": 4.132,
"step": 1500
}
],
"logging_steps": 50,
"max_steps": 12000,
"num_input_tokens_seen": 0,
"num_train_epochs": 11,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3505481451642752e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}