zlm_b64_le5_s8000 / checkpoint-1000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 1000, checkpoint
1205a1f verified
raw
history blame
No virus
4.56 kB
{
"best_metric": 0.49923568964004517,
"best_model_checkpoint": "mikhail-panzo/zlm_b64_le5_s8000/checkpoint-1000",
"epoch": 0.837696335078534,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.041884816753926704,
"grad_norm": 11.218673706054688,
"learning_rate": 2.4500000000000004e-07,
"loss": 1.1196,
"step": 50
},
{
"epoch": 0.08376963350785341,
"grad_norm": 5.423076629638672,
"learning_rate": 4.95e-07,
"loss": 1.057,
"step": 100
},
{
"epoch": 0.1256544502617801,
"grad_norm": 12.713382720947266,
"learning_rate": 7.4e-07,
"loss": 1.0037,
"step": 150
},
{
"epoch": 0.16753926701570682,
"grad_norm": 4.434875965118408,
"learning_rate": 9.9e-07,
"loss": 0.9381,
"step": 200
},
{
"epoch": 0.2094240837696335,
"grad_norm": 3.668553590774536,
"learning_rate": 1.2400000000000002e-06,
"loss": 0.8591,
"step": 250
},
{
"epoch": 0.2513089005235602,
"grad_norm": 2.2456483840942383,
"learning_rate": 1.4900000000000001e-06,
"loss": 0.8262,
"step": 300
},
{
"epoch": 0.2931937172774869,
"grad_norm": 2.70170259475708,
"learning_rate": 1.74e-06,
"loss": 0.7634,
"step": 350
},
{
"epoch": 0.33507853403141363,
"grad_norm": 2.9946064949035645,
"learning_rate": 1.9900000000000004e-06,
"loss": 0.7336,
"step": 400
},
{
"epoch": 0.3769633507853403,
"grad_norm": 2.2950398921966553,
"learning_rate": 2.24e-06,
"loss": 0.7121,
"step": 450
},
{
"epoch": 0.418848167539267,
"grad_norm": 8.007049560546875,
"learning_rate": 2.4900000000000003e-06,
"loss": 0.6879,
"step": 500
},
{
"epoch": 0.418848167539267,
"eval_loss": 0.5903987884521484,
"eval_runtime": 258.509,
"eval_samples_per_second": 32.838,
"eval_steps_per_second": 4.108,
"step": 500
},
{
"epoch": 0.4607329842931937,
"grad_norm": 3.0767672061920166,
"learning_rate": 2.7400000000000004e-06,
"loss": 0.6734,
"step": 550
},
{
"epoch": 0.5026178010471204,
"grad_norm": 8.150467872619629,
"learning_rate": 2.99e-06,
"loss": 0.6539,
"step": 600
},
{
"epoch": 0.5445026178010471,
"grad_norm": 2.2473220825195312,
"learning_rate": 3.2400000000000003e-06,
"loss": 0.657,
"step": 650
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.448143482208252,
"learning_rate": 3.49e-06,
"loss": 0.6258,
"step": 700
},
{
"epoch": 0.6282722513089005,
"grad_norm": 4.810636520385742,
"learning_rate": 3.74e-06,
"loss": 0.6155,
"step": 750
},
{
"epoch": 0.6701570680628273,
"grad_norm": 2.5927581787109375,
"learning_rate": 3.990000000000001e-06,
"loss": 0.6073,
"step": 800
},
{
"epoch": 0.7120418848167539,
"grad_norm": 1.913765549659729,
"learning_rate": 4.24e-06,
"loss": 0.6118,
"step": 850
},
{
"epoch": 0.7539267015706806,
"grad_norm": 2.210376262664795,
"learning_rate": 4.49e-06,
"loss": 0.5807,
"step": 900
},
{
"epoch": 0.7958115183246073,
"grad_norm": 3.323096752166748,
"learning_rate": 4.74e-06,
"loss": 0.5915,
"step": 950
},
{
"epoch": 0.837696335078534,
"grad_norm": 2.1455421447753906,
"learning_rate": 4.9900000000000005e-06,
"loss": 0.5606,
"step": 1000
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.49923568964004517,
"eval_runtime": 260.1008,
"eval_samples_per_second": 32.637,
"eval_steps_per_second": 4.083,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8963407491426432.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}