zlm_b128_le4_s8000 / last-checkpoint /trainer_state.json
mikhail-panzo's picture
Training in progress, step 2500, checkpoint
61f067a verified
raw
history blame
No virus
10.2 kB
{
"best_metric": 0.3528364896774292,
"best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-2500",
"epoch": 4.18848167539267,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08376963350785341,
"grad_norm": 2.9895308017730713,
"learning_rate": 2.4500000000000003e-06,
"loss": 1.0423,
"step": 50
},
{
"epoch": 0.16753926701570682,
"grad_norm": 3.051593542098999,
"learning_rate": 4.950000000000001e-06,
"loss": 0.8473,
"step": 100
},
{
"epoch": 0.2513089005235602,
"grad_norm": 2.0044381618499756,
"learning_rate": 7.45e-06,
"loss": 0.733,
"step": 150
},
{
"epoch": 0.33507853403141363,
"grad_norm": 3.4974701404571533,
"learning_rate": 9.950000000000001e-06,
"loss": 0.6511,
"step": 200
},
{
"epoch": 0.418848167539267,
"grad_norm": 1.854073405265808,
"learning_rate": 1.2450000000000001e-05,
"loss": 0.6143,
"step": 250
},
{
"epoch": 0.5026178010471204,
"grad_norm": 1.737787127494812,
"learning_rate": 1.4950000000000001e-05,
"loss": 0.5909,
"step": 300
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.0971367359161377,
"learning_rate": 1.745e-05,
"loss": 0.5684,
"step": 350
},
{
"epoch": 0.6701570680628273,
"grad_norm": 1.8380221128463745,
"learning_rate": 1.995e-05,
"loss": 0.5472,
"step": 400
},
{
"epoch": 0.7539267015706806,
"grad_norm": 3.9271857738494873,
"learning_rate": 2.245e-05,
"loss": 0.5287,
"step": 450
},
{
"epoch": 0.837696335078534,
"grad_norm": 7.809891700744629,
"learning_rate": 2.495e-05,
"loss": 0.5174,
"step": 500
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4793977439403534,
"eval_runtime": 265.0789,
"eval_samples_per_second": 32.024,
"eval_steps_per_second": 4.006,
"step": 500
},
{
"epoch": 0.9214659685863874,
"grad_norm": 2.2309463024139404,
"learning_rate": 2.7450000000000003e-05,
"loss": 0.5084,
"step": 550
},
{
"epoch": 1.0052356020942408,
"grad_norm": 1.8079086542129517,
"learning_rate": 2.995e-05,
"loss": 0.4954,
"step": 600
},
{
"epoch": 1.0890052356020943,
"grad_norm": 6.239879608154297,
"learning_rate": 3.245e-05,
"loss": 0.4954,
"step": 650
},
{
"epoch": 1.1727748691099475,
"grad_norm": 12.593622207641602,
"learning_rate": 3.495e-05,
"loss": 0.4946,
"step": 700
},
{
"epoch": 1.256544502617801,
"grad_norm": 3.1568186283111572,
"learning_rate": 3.745e-05,
"loss": 0.4768,
"step": 750
},
{
"epoch": 1.3403141361256545,
"grad_norm": 3.9486255645751953,
"learning_rate": 3.995e-05,
"loss": 0.4737,
"step": 800
},
{
"epoch": 1.4240837696335078,
"grad_norm": 2.641502618789673,
"learning_rate": 4.245e-05,
"loss": 0.4706,
"step": 850
},
{
"epoch": 1.5078534031413613,
"grad_norm": 1.9798855781555176,
"learning_rate": 4.495e-05,
"loss": 0.4605,
"step": 900
},
{
"epoch": 1.5916230366492146,
"grad_norm": 2.691363573074341,
"learning_rate": 4.745e-05,
"loss": 0.4554,
"step": 950
},
{
"epoch": 1.675392670157068,
"grad_norm": 3.704902410507202,
"learning_rate": 4.995e-05,
"loss": 0.4561,
"step": 1000
},
{
"epoch": 1.675392670157068,
"eval_loss": 0.402164101600647,
"eval_runtime": 265.618,
"eval_samples_per_second": 31.959,
"eval_steps_per_second": 3.998,
"step": 1000
},
{
"epoch": 1.7591623036649215,
"grad_norm": 4.97900390625,
"learning_rate": 5.245e-05,
"loss": 0.4553,
"step": 1050
},
{
"epoch": 1.8429319371727748,
"grad_norm": 1.9889676570892334,
"learning_rate": 5.495e-05,
"loss": 0.449,
"step": 1100
},
{
"epoch": 1.9267015706806283,
"grad_norm": 1.5135546922683716,
"learning_rate": 5.745e-05,
"loss": 0.4353,
"step": 1150
},
{
"epoch": 2.0104712041884816,
"grad_norm": 7.610673904418945,
"learning_rate": 5.995000000000001e-05,
"loss": 0.4311,
"step": 1200
},
{
"epoch": 2.094240837696335,
"grad_norm": 2.049562454223633,
"learning_rate": 6.245000000000001e-05,
"loss": 0.4312,
"step": 1250
},
{
"epoch": 2.1780104712041886,
"grad_norm": 1.4102027416229248,
"learning_rate": 6.494999999999999e-05,
"loss": 0.4282,
"step": 1300
},
{
"epoch": 2.261780104712042,
"grad_norm": 1.701119065284729,
"learning_rate": 6.745e-05,
"loss": 0.4272,
"step": 1350
},
{
"epoch": 2.345549738219895,
"grad_norm": 2.0149667263031006,
"learning_rate": 6.995e-05,
"loss": 0.4277,
"step": 1400
},
{
"epoch": 2.4293193717277486,
"grad_norm": 2.1658883094787598,
"learning_rate": 7.245000000000001e-05,
"loss": 0.4247,
"step": 1450
},
{
"epoch": 2.513089005235602,
"grad_norm": 2.6821463108062744,
"learning_rate": 7.495e-05,
"loss": 0.4169,
"step": 1500
},
{
"epoch": 2.513089005235602,
"eval_loss": 0.3853827118873596,
"eval_runtime": 269.2607,
"eval_samples_per_second": 31.527,
"eval_steps_per_second": 3.944,
"step": 1500
},
{
"epoch": 2.5968586387434556,
"grad_norm": 1.4221985340118408,
"learning_rate": 7.745e-05,
"loss": 0.4145,
"step": 1550
},
{
"epoch": 2.680628272251309,
"grad_norm": 3.651655435562134,
"learning_rate": 7.995e-05,
"loss": 0.4129,
"step": 1600
},
{
"epoch": 2.7643979057591626,
"grad_norm": 1.2700576782226562,
"learning_rate": 8.245e-05,
"loss": 0.4106,
"step": 1650
},
{
"epoch": 2.8481675392670156,
"grad_norm": 1.8690059185028076,
"learning_rate": 8.495e-05,
"loss": 0.4134,
"step": 1700
},
{
"epoch": 2.931937172774869,
"grad_norm": 2.483203649520874,
"learning_rate": 8.745000000000001e-05,
"loss": 0.413,
"step": 1750
},
{
"epoch": 3.0157068062827226,
"grad_norm": 1.738501787185669,
"learning_rate": 8.995e-05,
"loss": 0.4063,
"step": 1800
},
{
"epoch": 3.099476439790576,
"grad_norm": 1.837342619895935,
"learning_rate": 9.245e-05,
"loss": 0.4049,
"step": 1850
},
{
"epoch": 3.183246073298429,
"grad_norm": 1.284974455833435,
"learning_rate": 9.495e-05,
"loss": 0.4065,
"step": 1900
},
{
"epoch": 3.2670157068062826,
"grad_norm": 2.637281656265259,
"learning_rate": 9.745000000000001e-05,
"loss": 0.3986,
"step": 1950
},
{
"epoch": 3.350785340314136,
"grad_norm": 1.4775941371917725,
"learning_rate": 9.995e-05,
"loss": 0.4054,
"step": 2000
},
{
"epoch": 3.350785340314136,
"eval_loss": 0.36490994691848755,
"eval_runtime": 265.9165,
"eval_samples_per_second": 31.924,
"eval_steps_per_second": 3.994,
"step": 2000
},
{
"epoch": 3.4345549738219896,
"grad_norm": 2.121384859085083,
"learning_rate": 9.918333333333334e-05,
"loss": 0.4058,
"step": 2050
},
{
"epoch": 3.518324607329843,
"grad_norm": 1.645984411239624,
"learning_rate": 9.835e-05,
"loss": 0.4021,
"step": 2100
},
{
"epoch": 3.6020942408376966,
"grad_norm": 1.246239185333252,
"learning_rate": 9.751666666666666e-05,
"loss": 0.3991,
"step": 2150
},
{
"epoch": 3.6858638743455496,
"grad_norm": 1.9096795320510864,
"learning_rate": 9.668333333333334e-05,
"loss": 0.3961,
"step": 2200
},
{
"epoch": 3.769633507853403,
"grad_norm": 1.8867601156234741,
"learning_rate": 9.585000000000001e-05,
"loss": 0.3904,
"step": 2250
},
{
"epoch": 3.8534031413612566,
"grad_norm": 1.7438101768493652,
"learning_rate": 9.501666666666668e-05,
"loss": 0.3895,
"step": 2300
},
{
"epoch": 3.93717277486911,
"grad_norm": 1.1799490451812744,
"learning_rate": 9.418333333333334e-05,
"loss": 0.4027,
"step": 2350
},
{
"epoch": 4.020942408376963,
"grad_norm": 1.1952763795852661,
"learning_rate": 9.335e-05,
"loss": 0.3893,
"step": 2400
},
{
"epoch": 4.104712041884817,
"grad_norm": 2.008756160736084,
"learning_rate": 9.251666666666667e-05,
"loss": 0.3878,
"step": 2450
},
{
"epoch": 4.18848167539267,
"grad_norm": 2.2693591117858887,
"learning_rate": 9.168333333333333e-05,
"loss": 0.3863,
"step": 2500
},
{
"epoch": 4.18848167539267,
"eval_loss": 0.3528364896774292,
"eval_runtime": 272.7627,
"eval_samples_per_second": 31.122,
"eval_steps_per_second": 3.893,
"step": 2500
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 14,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.477753664307475e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}