mikhail-panzo's picture
Training in progress, step 1500, checkpoint
2fbb8a8 verified
raw
history blame
6.42 kB
{
"best_metric": 0.384281188249588,
"best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s12000/checkpoint-1500",
"epoch": 2.513089005235602,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08376963350785341,
"grad_norm": 2.469545841217041,
"learning_rate": 2.4500000000000003e-06,
"loss": 1.0402,
"step": 50
},
{
"epoch": 0.16753926701570682,
"grad_norm": 3.3642780780792236,
"learning_rate": 4.950000000000001e-06,
"loss": 0.8472,
"step": 100
},
{
"epoch": 0.2513089005235602,
"grad_norm": 1.8375087976455688,
"learning_rate": 7.45e-06,
"loss": 0.7331,
"step": 150
},
{
"epoch": 0.33507853403141363,
"grad_norm": 3.700824737548828,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6472,
"step": 200
},
{
"epoch": 0.418848167539267,
"grad_norm": 1.7632888555526733,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.6112,
"step": 250
},
{
"epoch": 0.5026178010471204,
"grad_norm": 3.1861908435821533,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5865,
"step": 300
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.2845046520233154,
"learning_rate": 1.745e-05,
"loss": 0.5682,
"step": 350
},
{
"epoch": 0.6701570680628273,
"grad_norm": 2.079210042953491,
"learning_rate": 1.995e-05,
"loss": 0.5465,
"step": 400
},
{
"epoch": 0.7539267015706806,
"grad_norm": 3.669891119003296,
"learning_rate": 2.245e-05,
"loss": 0.5302,
"step": 450
},
{
"epoch": 0.837696335078534,
"grad_norm": 2.4679417610168457,
"learning_rate": 2.495e-05,
"loss": 0.5137,
"step": 500
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4513999819755554,
"eval_runtime": 277.1074,
"eval_samples_per_second": 30.634,
"eval_steps_per_second": 3.832,
"step": 500
},
{
"epoch": 0.9214659685863874,
"grad_norm": 2.703871726989746,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.5071,
"step": 550
},
{
"epoch": 1.0052356020942408,
"grad_norm": 3.8184635639190674,
"learning_rate": 2.995e-05,
"loss": 0.4971,
"step": 600
},
{
"epoch": 1.0890052356020943,
"grad_norm": 2.2857866287231445,
"learning_rate": 3.245e-05,
"loss": 0.4955,
"step": 650
},
{
"epoch": 1.1727748691099475,
"grad_norm": 3.5974085330963135,
"learning_rate": 3.495e-05,
"loss": 0.4935,
"step": 700
},
{
"epoch": 1.256544502617801,
"grad_norm": 1.720818281173706,
"learning_rate": 3.745e-05,
"loss": 0.4763,
"step": 750
},
{
"epoch": 1.3403141361256545,
"grad_norm": 3.080139636993408,
"learning_rate": 3.995e-05,
"loss": 0.4735,
"step": 800
},
{
"epoch": 1.4240837696335078,
"grad_norm": 4.877579212188721,
"learning_rate": 4.245e-05,
"loss": 0.4654,
"step": 850
},
{
"epoch": 1.5078534031413613,
"grad_norm": 3.383965253829956,
"learning_rate": 4.495e-05,
"loss": 0.4628,
"step": 900
},
{
"epoch": 1.5916230366492146,
"grad_norm": 3.3636982440948486,
"learning_rate": 4.745e-05,
"loss": 0.4541,
"step": 950
},
{
"epoch": 1.675392670157068,
"grad_norm": 1.666568398475647,
"learning_rate": 4.995e-05,
"loss": 0.4565,
"step": 1000
},
{
"epoch": 1.675392670157068,
"eval_loss": 0.4093586802482605,
"eval_runtime": 269.4005,
"eval_samples_per_second": 31.511,
"eval_steps_per_second": 3.942,
"step": 1000
},
{
"epoch": 1.7591623036649215,
"grad_norm": 1.6522510051727295,
"learning_rate": 5.245e-05,
"loss": 0.4541,
"step": 1050
},
{
"epoch": 1.8429319371727748,
"grad_norm": 1.6531606912612915,
"learning_rate": 5.495e-05,
"loss": 0.4448,
"step": 1100
},
{
"epoch": 1.9267015706806283,
"grad_norm": 3.3253750801086426,
"learning_rate": 5.745e-05,
"loss": 0.4346,
"step": 1150
},
{
"epoch": 2.0104712041884816,
"grad_norm": 2.0393073558807373,
"learning_rate": 5.995000000000001e-05,
"loss": 0.4314,
"step": 1200
},
{
"epoch": 2.094240837696335,
"grad_norm": 1.906546950340271,
"learning_rate": 6.245000000000001e-05,
"loss": 0.4327,
"step": 1250
},
{
"epoch": 2.1780104712041886,
"grad_norm": 1.7925021648406982,
"learning_rate": 6.494999999999999e-05,
"loss": 0.4285,
"step": 1300
},
{
"epoch": 2.261780104712042,
"grad_norm": 2.5238988399505615,
"learning_rate": 6.745e-05,
"loss": 0.4251,
"step": 1350
},
{
"epoch": 2.345549738219895,
"grad_norm": 2.53450345993042,
"learning_rate": 6.995e-05,
"loss": 0.4284,
"step": 1400
},
{
"epoch": 2.4293193717277486,
"grad_norm": 1.9077616930007935,
"learning_rate": 7.245000000000001e-05,
"loss": 0.4244,
"step": 1450
},
{
"epoch": 2.513089005235602,
"grad_norm": 1.5720113515853882,
"learning_rate": 7.495e-05,
"loss": 0.4171,
"step": 1500
},
{
"epoch": 2.513089005235602,
"eval_loss": 0.384281188249588,
"eval_runtime": 274.8763,
"eval_samples_per_second": 30.883,
"eval_steps_per_second": 3.864,
"step": 1500
}
],
"logging_steps": 50,
"max_steps": 12000,
"num_input_tokens_seen": 0,
"num_train_epochs": 21,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.698169585564851e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}