mikhail-panzo's picture
Training in progress, step 2500, checkpoint
0df4c4c verified
raw
history blame
10.1 kB
{
"best_metric": 0.41223829984664917,
"best_model_checkpoint": "mikhail-panzo/zlm-fil-ceb_b64_le5_s8000/checkpoint-1500",
"epoch": 108.69565217391305,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.1739130434782608,
"grad_norm": 0.7357823252677917,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.4055,
"step": 50
},
{
"epoch": 4.3478260869565215,
"grad_norm": 0.7944665551185608,
"learning_rate": 5.000000000000001e-07,
"loss": 0.3988,
"step": 100
},
{
"epoch": 6.521739130434782,
"grad_norm": 0.7226569652557373,
"learning_rate": 7.5e-07,
"loss": 0.4072,
"step": 150
},
{
"epoch": 8.695652173913043,
"grad_norm": 0.7845238447189331,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.4028,
"step": 200
},
{
"epoch": 10.869565217391305,
"grad_norm": 0.9769566655158997,
"learning_rate": 1.25e-06,
"loss": 0.4066,
"step": 250
},
{
"epoch": 13.043478260869565,
"grad_norm": 0.8634074330329895,
"learning_rate": 1.5e-06,
"loss": 0.405,
"step": 300
},
{
"epoch": 15.217391304347826,
"grad_norm": 0.9029327034950256,
"learning_rate": 1.75e-06,
"loss": 0.4074,
"step": 350
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.7624074220657349,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.406,
"step": 400
},
{
"epoch": 19.565217391304348,
"grad_norm": 1.005196213722229,
"learning_rate": 2.25e-06,
"loss": 0.4052,
"step": 450
},
{
"epoch": 21.73913043478261,
"grad_norm": 1.0276380777359009,
"learning_rate": 2.5e-06,
"loss": 0.4019,
"step": 500
},
{
"epoch": 21.73913043478261,
"eval_loss": 0.4143177568912506,
"eval_runtime": 6.4074,
"eval_samples_per_second": 24.815,
"eval_steps_per_second": 3.121,
"step": 500
},
{
"epoch": 23.91304347826087,
"grad_norm": 0.9067140817642212,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.4021,
"step": 550
},
{
"epoch": 26.08695652173913,
"grad_norm": 0.8799042701721191,
"learning_rate": 3e-06,
"loss": 0.4071,
"step": 600
},
{
"epoch": 28.26086956521739,
"grad_norm": 1.0451533794403076,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.4146,
"step": 650
},
{
"epoch": 30.434782608695652,
"grad_norm": 0.7998800277709961,
"learning_rate": 3.5e-06,
"loss": 0.4023,
"step": 700
},
{
"epoch": 32.608695652173914,
"grad_norm": 1.0301792621612549,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.4045,
"step": 750
},
{
"epoch": 34.78260869565217,
"grad_norm": 0.6388185620307922,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4045,
"step": 800
},
{
"epoch": 36.95652173913044,
"grad_norm": 0.9676840901374817,
"learning_rate": 4.25e-06,
"loss": 0.4116,
"step": 850
},
{
"epoch": 39.130434782608695,
"grad_norm": 0.8355514407157898,
"learning_rate": 4.5e-06,
"loss": 0.4013,
"step": 900
},
{
"epoch": 41.30434782608695,
"grad_norm": 0.7534067630767822,
"learning_rate": 4.75e-06,
"loss": 0.4019,
"step": 950
},
{
"epoch": 43.47826086956522,
"grad_norm": 0.9366117119789124,
"learning_rate": 5e-06,
"loss": 0.4064,
"step": 1000
},
{
"epoch": 43.47826086956522,
"eval_loss": 0.41316837072372437,
"eval_runtime": 6.5616,
"eval_samples_per_second": 24.232,
"eval_steps_per_second": 3.048,
"step": 1000
},
{
"epoch": 45.65217391304348,
"grad_norm": 0.7025179266929626,
"learning_rate": 5.2500000000000006e-06,
"loss": 0.4061,
"step": 1050
},
{
"epoch": 47.82608695652174,
"grad_norm": 0.9240646958351135,
"learning_rate": 5.500000000000001e-06,
"loss": 0.4061,
"step": 1100
},
{
"epoch": 50.0,
"grad_norm": 0.9685391187667847,
"learning_rate": 5.75e-06,
"loss": 0.4063,
"step": 1150
},
{
"epoch": 52.17391304347826,
"grad_norm": 0.8591094017028809,
"learning_rate": 6e-06,
"loss": 0.4013,
"step": 1200
},
{
"epoch": 54.34782608695652,
"grad_norm": 0.9348228573799133,
"learning_rate": 6.25e-06,
"loss": 0.402,
"step": 1250
},
{
"epoch": 56.52173913043478,
"grad_norm": 0.7908409237861633,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.4046,
"step": 1300
},
{
"epoch": 58.69565217391305,
"grad_norm": 1.109017014503479,
"learning_rate": 6.750000000000001e-06,
"loss": 0.3995,
"step": 1350
},
{
"epoch": 60.869565217391305,
"grad_norm": 0.8529478311538696,
"learning_rate": 7e-06,
"loss": 0.4018,
"step": 1400
},
{
"epoch": 63.04347826086956,
"grad_norm": 0.9947476387023926,
"learning_rate": 7.25e-06,
"loss": 0.401,
"step": 1450
},
{
"epoch": 65.21739130434783,
"grad_norm": 1.2476813793182373,
"learning_rate": 7.500000000000001e-06,
"loss": 0.4034,
"step": 1500
},
{
"epoch": 65.21739130434783,
"eval_loss": 0.41223829984664917,
"eval_runtime": 6.5846,
"eval_samples_per_second": 24.147,
"eval_steps_per_second": 3.037,
"step": 1500
},
{
"epoch": 67.3913043478261,
"grad_norm": 0.8658013343811035,
"learning_rate": 7.75e-06,
"loss": 0.4007,
"step": 1550
},
{
"epoch": 69.56521739130434,
"grad_norm": 0.7932788133621216,
"learning_rate": 8.000000000000001e-06,
"loss": 0.4054,
"step": 1600
},
{
"epoch": 71.73913043478261,
"grad_norm": 2.33347225189209,
"learning_rate": 8.25e-06,
"loss": 0.4075,
"step": 1650
},
{
"epoch": 73.91304347826087,
"grad_norm": 1.0095164775848389,
"learning_rate": 8.5e-06,
"loss": 0.3982,
"step": 1700
},
{
"epoch": 76.08695652173913,
"grad_norm": 0.8895902633666992,
"learning_rate": 8.750000000000001e-06,
"loss": 0.4018,
"step": 1750
},
{
"epoch": 78.26086956521739,
"grad_norm": 1.2252330780029297,
"learning_rate": 9e-06,
"loss": 0.4024,
"step": 1800
},
{
"epoch": 80.43478260869566,
"grad_norm": 0.7731898427009583,
"learning_rate": 9.250000000000001e-06,
"loss": 0.3994,
"step": 1850
},
{
"epoch": 82.6086956521739,
"grad_norm": 1.047607660293579,
"learning_rate": 9.5e-06,
"loss": 0.4013,
"step": 1900
},
{
"epoch": 84.78260869565217,
"grad_norm": 0.9284490346908569,
"learning_rate": 9.75e-06,
"loss": 0.3986,
"step": 1950
},
{
"epoch": 86.95652173913044,
"grad_norm": 4.725491046905518,
"learning_rate": 1e-05,
"loss": 0.3985,
"step": 2000
},
{
"epoch": 86.95652173913044,
"eval_loss": 0.4147759974002838,
"eval_runtime": 6.5475,
"eval_samples_per_second": 24.284,
"eval_steps_per_second": 3.055,
"step": 2000
},
{
"epoch": 89.1304347826087,
"grad_norm": 1.1678683757781982,
"learning_rate": 9.916666666666668e-06,
"loss": 0.4066,
"step": 2050
},
{
"epoch": 91.30434782608695,
"grad_norm": 1.988890528678894,
"learning_rate": 9.835000000000002e-06,
"loss": 0.3997,
"step": 2100
},
{
"epoch": 93.47826086956522,
"grad_norm": 1.0278427600860596,
"learning_rate": 9.751666666666667e-06,
"loss": 0.3948,
"step": 2150
},
{
"epoch": 95.65217391304348,
"grad_norm": 0.9606735110282898,
"learning_rate": 9.67e-06,
"loss": 0.42,
"step": 2200
},
{
"epoch": 97.82608695652173,
"grad_norm": 1.0861903429031372,
"learning_rate": 9.586666666666667e-06,
"loss": 0.398,
"step": 2250
},
{
"epoch": 100.0,
"grad_norm": 1.4872643947601318,
"learning_rate": 9.503333333333334e-06,
"loss": 0.4059,
"step": 2300
},
{
"epoch": 102.17391304347827,
"grad_norm": 1.0460981130599976,
"learning_rate": 9.42e-06,
"loss": 0.4014,
"step": 2350
},
{
"epoch": 104.34782608695652,
"grad_norm": 1.906476378440857,
"learning_rate": 9.336666666666666e-06,
"loss": 0.4015,
"step": 2400
},
{
"epoch": 106.52173913043478,
"grad_norm": 1.3965051174163818,
"learning_rate": 9.253333333333333e-06,
"loss": 0.4063,
"step": 2450
},
{
"epoch": 108.69565217391305,
"grad_norm": 1.7410107851028442,
"learning_rate": 9.17e-06,
"loss": 0.396,
"step": 2500
},
{
"epoch": 108.69565217391305,
"eval_loss": 0.41233959794044495,
"eval_runtime": 6.6031,
"eval_samples_per_second": 24.08,
"eval_steps_per_second": 3.029,
"step": 2500
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 348,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.699953465472e+16,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}