zlm_b64_le4_s12000 / checkpoint-2500 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 2500, checkpoint
6f518c5 verified
raw
history blame
No virus
10.1 kB
{
"best_metric": 0.35975897312164307,
"best_model_checkpoint": "mikhail_panzo/zlm_b64_le4_s12000/checkpoint-2500",
"epoch": 2.094869109947644,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.041884816753926704,
"grad_norm": 2.697540283203125,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.0526,
"step": 50
},
{
"epoch": 0.08376963350785341,
"grad_norm": 4.0351996421813965,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.8484,
"step": 100
},
{
"epoch": 0.1256544502617801,
"grad_norm": 4.858840465545654,
"learning_rate": 7.4e-06,
"loss": 0.7505,
"step": 150
},
{
"epoch": 0.16753926701570682,
"grad_norm": 2.396157741546631,
"learning_rate": 9.900000000000002e-06,
"loss": 0.6889,
"step": 200
},
{
"epoch": 0.2094240837696335,
"grad_norm": 2.660891056060791,
"learning_rate": 1.24e-05,
"loss": 0.6363,
"step": 250
},
{
"epoch": 0.2513089005235602,
"grad_norm": 2.848365068435669,
"learning_rate": 1.49e-05,
"loss": 0.6107,
"step": 300
},
{
"epoch": 0.2931937172774869,
"grad_norm": 2.0031681060791016,
"learning_rate": 1.74e-05,
"loss": 0.5673,
"step": 350
},
{
"epoch": 0.33507853403141363,
"grad_norm": 4.771584510803223,
"learning_rate": 1.9900000000000003e-05,
"loss": 0.5566,
"step": 400
},
{
"epoch": 0.3769633507853403,
"grad_norm": 9.876974105834961,
"learning_rate": 2.2400000000000002e-05,
"loss": 0.5487,
"step": 450
},
{
"epoch": 0.418848167539267,
"grad_norm": 2.113546848297119,
"learning_rate": 2.4900000000000002e-05,
"loss": 0.5268,
"step": 500
},
{
"epoch": 0.418848167539267,
"eval_loss": 0.46557939052581787,
"eval_runtime": 297.9193,
"eval_samples_per_second": 28.494,
"eval_steps_per_second": 3.565,
"step": 500
},
{
"epoch": 0.4607329842931937,
"grad_norm": 2.7003655433654785,
"learning_rate": 2.7400000000000002e-05,
"loss": 0.5212,
"step": 550
},
{
"epoch": 0.5026178010471204,
"grad_norm": 5.649191379547119,
"learning_rate": 2.9900000000000002e-05,
"loss": 0.5138,
"step": 600
},
{
"epoch": 0.5445026178010471,
"grad_norm": 2.272181749343872,
"learning_rate": 3.24e-05,
"loss": 0.5121,
"step": 650
},
{
"epoch": 0.5863874345549738,
"grad_norm": 2.957671642303467,
"learning_rate": 3.49e-05,
"loss": 0.496,
"step": 700
},
{
"epoch": 0.6282722513089005,
"grad_norm": 3.577693462371826,
"learning_rate": 3.74e-05,
"loss": 0.4886,
"step": 750
},
{
"epoch": 0.6701570680628273,
"grad_norm": 2.466113328933716,
"learning_rate": 3.99e-05,
"loss": 0.4879,
"step": 800
},
{
"epoch": 0.7120418848167539,
"grad_norm": 2.941208600997925,
"learning_rate": 4.24e-05,
"loss": 0.4865,
"step": 850
},
{
"epoch": 0.7539267015706806,
"grad_norm": 2.3843142986297607,
"learning_rate": 4.49e-05,
"loss": 0.4689,
"step": 900
},
{
"epoch": 0.7958115183246073,
"grad_norm": 2.232208728790283,
"learning_rate": 4.74e-05,
"loss": 0.4768,
"step": 950
},
{
"epoch": 0.837696335078534,
"grad_norm": 2.1702969074249268,
"learning_rate": 4.99e-05,
"loss": 0.4583,
"step": 1000
},
{
"epoch": 0.837696335078534,
"eval_loss": 0.4044812321662903,
"eval_runtime": 296.3081,
"eval_samples_per_second": 28.649,
"eval_steps_per_second": 3.584,
"step": 1000
},
{
"epoch": 0.8795811518324608,
"grad_norm": 2.7613942623138428,
"learning_rate": 5.2400000000000007e-05,
"loss": 0.4664,
"step": 1050
},
{
"epoch": 0.9214659685863874,
"grad_norm": 2.4752321243286133,
"learning_rate": 5.4900000000000006e-05,
"loss": 0.442,
"step": 1100
},
{
"epoch": 0.9633507853403142,
"grad_norm": 2.27407169342041,
"learning_rate": 5.74e-05,
"loss": 0.4498,
"step": 1150
},
{
"epoch": 1.0052356020942408,
"grad_norm": 1.8998862504959106,
"learning_rate": 5.99e-05,
"loss": 0.4433,
"step": 1200
},
{
"epoch": 1.0471204188481675,
"grad_norm": 2.8479738235473633,
"learning_rate": 6.24e-05,
"loss": 0.4417,
"step": 1250
},
{
"epoch": 1.0890052356020943,
"grad_norm": 1.9095953702926636,
"learning_rate": 6.49e-05,
"loss": 0.444,
"step": 1300
},
{
"epoch": 1.130890052356021,
"grad_norm": 1.368077278137207,
"learning_rate": 6.740000000000001e-05,
"loss": 0.4519,
"step": 1350
},
{
"epoch": 1.1727748691099475,
"grad_norm": 2.0456721782684326,
"learning_rate": 6.99e-05,
"loss": 0.4411,
"step": 1400
},
{
"epoch": 1.2146596858638743,
"grad_norm": 1.6217619180679321,
"learning_rate": 7.24e-05,
"loss": 0.4324,
"step": 1450
},
{
"epoch": 1.256544502617801,
"grad_norm": 2.961646795272827,
"learning_rate": 7.49e-05,
"loss": 0.4321,
"step": 1500
},
{
"epoch": 1.256544502617801,
"eval_loss": 0.3930450975894928,
"eval_runtime": 301.1428,
"eval_samples_per_second": 28.189,
"eval_steps_per_second": 3.527,
"step": 1500
},
{
"epoch": 1.2984293193717278,
"grad_norm": 2.543201446533203,
"learning_rate": 7.740000000000001e-05,
"loss": 0.4364,
"step": 1550
},
{
"epoch": 1.3403141361256545,
"grad_norm": 1.5807560682296753,
"learning_rate": 7.99e-05,
"loss": 0.433,
"step": 1600
},
{
"epoch": 1.3821989528795813,
"grad_norm": 1.587385892868042,
"learning_rate": 8.24e-05,
"loss": 0.4285,
"step": 1650
},
{
"epoch": 1.4240837696335078,
"grad_norm": 2.1177687644958496,
"learning_rate": 8.49e-05,
"loss": 0.4209,
"step": 1700
},
{
"epoch": 1.4659685863874345,
"grad_norm": 1.6078702211380005,
"learning_rate": 8.740000000000001e-05,
"loss": 0.426,
"step": 1750
},
{
"epoch": 1.5078534031413613,
"grad_norm": 2.10492205619812,
"learning_rate": 8.99e-05,
"loss": 0.4191,
"step": 1800
},
{
"epoch": 1.5497382198952878,
"grad_norm": 2.169309616088867,
"learning_rate": 9.240000000000001e-05,
"loss": 0.4178,
"step": 1850
},
{
"epoch": 1.5916230366492146,
"grad_norm": 1.559006929397583,
"learning_rate": 9.49e-05,
"loss": 0.4133,
"step": 1900
},
{
"epoch": 1.6335078534031413,
"grad_norm": 2.0436346530914307,
"learning_rate": 9.74e-05,
"loss": 0.4234,
"step": 1950
},
{
"epoch": 1.675392670157068,
"grad_norm": 2.089486837387085,
"learning_rate": 9.99e-05,
"loss": 0.4092,
"step": 2000
},
{
"epoch": 1.675392670157068,
"eval_loss": 0.37686464190483093,
"eval_runtime": 310.3113,
"eval_samples_per_second": 27.356,
"eval_steps_per_second": 3.422,
"step": 2000
},
{
"epoch": 1.7179057591623037,
"grad_norm": 2.684563398361206,
"learning_rate": 9.952e-05,
"loss": 0.4099,
"step": 2050
},
{
"epoch": 1.7597905759162304,
"grad_norm": 1.7815254926681519,
"learning_rate": 9.902e-05,
"loss": 0.4093,
"step": 2100
},
{
"epoch": 1.8016753926701572,
"grad_norm": 1.9900892972946167,
"learning_rate": 9.852e-05,
"loss": 0.4116,
"step": 2150
},
{
"epoch": 1.8435602094240837,
"grad_norm": 1.8637460470199585,
"learning_rate": 9.802e-05,
"loss": 0.4089,
"step": 2200
},
{
"epoch": 1.8854450261780105,
"grad_norm": 1.58171546459198,
"learning_rate": 9.752e-05,
"loss": 0.4053,
"step": 2250
},
{
"epoch": 1.9273298429319372,
"grad_norm": 1.4995630979537964,
"learning_rate": 9.702e-05,
"loss": 0.4049,
"step": 2300
},
{
"epoch": 1.9692146596858637,
"grad_norm": 2.447049379348755,
"learning_rate": 9.652e-05,
"loss": 0.4029,
"step": 2350
},
{
"epoch": 2.0110994764397905,
"grad_norm": 1.8776074647903442,
"learning_rate": 9.602e-05,
"loss": 0.3988,
"step": 2400
},
{
"epoch": 2.0529842931937172,
"grad_norm": 3.1785056591033936,
"learning_rate": 9.552000000000001e-05,
"loss": 0.4023,
"step": 2450
},
{
"epoch": 2.094869109947644,
"grad_norm": 1.4416707754135132,
"learning_rate": 9.502000000000001e-05,
"loss": 0.4008,
"step": 2500
},
{
"epoch": 2.094869109947644,
"eval_loss": 0.35975897312164307,
"eval_runtime": 275.2855,
"eval_samples_per_second": 30.837,
"eval_steps_per_second": 3.858,
"step": 2500
}
],
"logging_steps": 50,
"max_steps": 12000,
"num_input_tokens_seen": 0,
"num_train_epochs": 11,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.245101969215635e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}