fil_b32_le5_s4000 / checkpoint-4000 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 4000, checkpoint
efb7115
raw
history blame
No virus
15.7 kB
{
"best_metric": 0.4139060080051422,
"best_model_checkpoint": "mikhail-panzo/fil_b32_le5_s4000/checkpoint-4000",
"epoch": 88.88888888888889,
"eval_steps": 500,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.1111111111111112,
"grad_norm": 4.398683071136475,
"learning_rate": 2.4500000000000004e-07,
"loss": 0.8108,
"step": 50
},
{
"epoch": 2.2222222222222223,
"grad_norm": 3.726436138153076,
"learning_rate": 4.95e-07,
"loss": 0.792,
"step": 100
},
{
"epoch": 3.3333333333333335,
"grad_norm": 2.0379676818847656,
"learning_rate": 7.450000000000001e-07,
"loss": 0.7413,
"step": 150
},
{
"epoch": 4.444444444444445,
"grad_norm": 2.0460364818573,
"learning_rate": 9.950000000000002e-07,
"loss": 0.7342,
"step": 200
},
{
"epoch": 5.555555555555555,
"grad_norm": 2.803539276123047,
"learning_rate": 1.2450000000000002e-06,
"loss": 0.7147,
"step": 250
},
{
"epoch": 6.666666666666667,
"grad_norm": 1.9818241596221924,
"learning_rate": 1.495e-06,
"loss": 0.7072,
"step": 300
},
{
"epoch": 7.777777777777778,
"grad_norm": 3.081399917602539,
"learning_rate": 1.745e-06,
"loss": 0.6843,
"step": 350
},
{
"epoch": 8.88888888888889,
"grad_norm": 2.4465785026550293,
"learning_rate": 1.9950000000000004e-06,
"loss": 0.657,
"step": 400
},
{
"epoch": 10.0,
"grad_norm": 2.929323434829712,
"learning_rate": 2.245e-06,
"loss": 0.6597,
"step": 450
},
{
"epoch": 11.11111111111111,
"grad_norm": 1.419311285018921,
"learning_rate": 2.4950000000000003e-06,
"loss": 0.6246,
"step": 500
},
{
"epoch": 11.11111111111111,
"eval_loss": 0.5375720262527466,
"eval_runtime": 9.2568,
"eval_samples_per_second": 17.177,
"eval_steps_per_second": 2.161,
"step": 500
},
{
"epoch": 12.222222222222221,
"grad_norm": 2.3438374996185303,
"learning_rate": 2.7450000000000004e-06,
"loss": 0.6018,
"step": 550
},
{
"epoch": 13.333333333333334,
"grad_norm": 1.7441222667694092,
"learning_rate": 2.995e-06,
"loss": 0.5547,
"step": 600
},
{
"epoch": 14.444444444444445,
"grad_norm": 1.5592021942138672,
"learning_rate": 3.2450000000000003e-06,
"loss": 0.5466,
"step": 650
},
{
"epoch": 15.555555555555555,
"grad_norm": 1.305444359779358,
"learning_rate": 3.495e-06,
"loss": 0.535,
"step": 700
},
{
"epoch": 16.666666666666668,
"grad_norm": 2.2067477703094482,
"learning_rate": 3.745e-06,
"loss": 0.5343,
"step": 750
},
{
"epoch": 17.77777777777778,
"grad_norm": 1.667480707168579,
"learning_rate": 3.995000000000001e-06,
"loss": 0.5296,
"step": 800
},
{
"epoch": 18.88888888888889,
"grad_norm": 1.5465797185897827,
"learning_rate": 4.245e-06,
"loss": 0.5198,
"step": 850
},
{
"epoch": 20.0,
"grad_norm": 2.760589361190796,
"learning_rate": 4.495e-06,
"loss": 0.5193,
"step": 900
},
{
"epoch": 21.11111111111111,
"grad_norm": 1.4570050239562988,
"learning_rate": 4.745e-06,
"loss": 0.5124,
"step": 950
},
{
"epoch": 22.22222222222222,
"grad_norm": 2.1914265155792236,
"learning_rate": 4.9950000000000005e-06,
"loss": 0.5034,
"step": 1000
},
{
"epoch": 22.22222222222222,
"eval_loss": 0.45661377906799316,
"eval_runtime": 9.2158,
"eval_samples_per_second": 17.253,
"eval_steps_per_second": 2.17,
"step": 1000
},
{
"epoch": 23.333333333333332,
"grad_norm": 1.941751480102539,
"learning_rate": 5.245e-06,
"loss": 0.5046,
"step": 1050
},
{
"epoch": 24.444444444444443,
"grad_norm": 1.9293475151062012,
"learning_rate": 5.495000000000001e-06,
"loss": 0.4948,
"step": 1100
},
{
"epoch": 25.555555555555557,
"grad_norm": 1.3278849124908447,
"learning_rate": 5.745000000000001e-06,
"loss": 0.4931,
"step": 1150
},
{
"epoch": 26.666666666666668,
"grad_norm": 1.1657841205596924,
"learning_rate": 5.995000000000001e-06,
"loss": 0.496,
"step": 1200
},
{
"epoch": 27.77777777777778,
"grad_norm": 1.8730145692825317,
"learning_rate": 6.245000000000001e-06,
"loss": 0.4939,
"step": 1250
},
{
"epoch": 28.88888888888889,
"grad_norm": 2.1156740188598633,
"learning_rate": 6.4950000000000005e-06,
"loss": 0.4908,
"step": 1300
},
{
"epoch": 30.0,
"grad_norm": 1.799806833267212,
"learning_rate": 6.745000000000001e-06,
"loss": 0.4875,
"step": 1350
},
{
"epoch": 31.11111111111111,
"grad_norm": 1.762253761291504,
"learning_rate": 6.995000000000001e-06,
"loss": 0.4836,
"step": 1400
},
{
"epoch": 32.22222222222222,
"grad_norm": 1.305191993713379,
"learning_rate": 7.245000000000001e-06,
"loss": 0.4811,
"step": 1450
},
{
"epoch": 33.333333333333336,
"grad_norm": 1.5870022773742676,
"learning_rate": 7.495000000000001e-06,
"loss": 0.4807,
"step": 1500
},
{
"epoch": 33.333333333333336,
"eval_loss": 0.43601250648498535,
"eval_runtime": 9.5435,
"eval_samples_per_second": 16.66,
"eval_steps_per_second": 2.096,
"step": 1500
},
{
"epoch": 34.44444444444444,
"grad_norm": 2.632877826690674,
"learning_rate": 7.745e-06,
"loss": 0.4782,
"step": 1550
},
{
"epoch": 35.55555555555556,
"grad_norm": 1.3705276250839233,
"learning_rate": 7.995e-06,
"loss": 0.48,
"step": 1600
},
{
"epoch": 36.666666666666664,
"grad_norm": 1.9232587814331055,
"learning_rate": 8.245000000000002e-06,
"loss": 0.47,
"step": 1650
},
{
"epoch": 37.77777777777778,
"grad_norm": 1.3885018825531006,
"learning_rate": 8.495e-06,
"loss": 0.4711,
"step": 1700
},
{
"epoch": 38.888888888888886,
"grad_norm": 2.4857094287872314,
"learning_rate": 8.745000000000002e-06,
"loss": 0.4721,
"step": 1750
},
{
"epoch": 40.0,
"grad_norm": 1.7662663459777832,
"learning_rate": 8.995000000000001e-06,
"loss": 0.4656,
"step": 1800
},
{
"epoch": 41.111111111111114,
"grad_norm": 1.906548261642456,
"learning_rate": 9.245e-06,
"loss": 0.463,
"step": 1850
},
{
"epoch": 42.22222222222222,
"grad_norm": 1.7204883098602295,
"learning_rate": 9.495000000000001e-06,
"loss": 0.4592,
"step": 1900
},
{
"epoch": 43.333333333333336,
"grad_norm": 1.3313167095184326,
"learning_rate": 9.745e-06,
"loss": 0.4581,
"step": 1950
},
{
"epoch": 44.44444444444444,
"grad_norm": 1.94256591796875,
"learning_rate": 9.995000000000002e-06,
"loss": 0.4582,
"step": 2000
},
{
"epoch": 44.44444444444444,
"eval_loss": 0.4244868755340576,
"eval_runtime": 9.1316,
"eval_samples_per_second": 17.412,
"eval_steps_per_second": 2.19,
"step": 2000
},
{
"epoch": 45.55555555555556,
"grad_norm": 2.019158124923706,
"learning_rate": 9.755e-06,
"loss": 0.4573,
"step": 2050
},
{
"epoch": 46.666666666666664,
"grad_norm": 1.7636100053787231,
"learning_rate": 9.505000000000001e-06,
"loss": 0.4642,
"step": 2100
},
{
"epoch": 47.77777777777778,
"grad_norm": 2.0878355503082275,
"learning_rate": 9.260000000000001e-06,
"loss": 0.454,
"step": 2150
},
{
"epoch": 48.888888888888886,
"grad_norm": 1.5207270383834839,
"learning_rate": 9.01e-06,
"loss": 0.4573,
"step": 2200
},
{
"epoch": 50.0,
"grad_norm": 2.592440605163574,
"learning_rate": 8.76e-06,
"loss": 0.4557,
"step": 2250
},
{
"epoch": 51.111111111111114,
"grad_norm": 4.970732688903809,
"learning_rate": 8.51e-06,
"loss": 0.4565,
"step": 2300
},
{
"epoch": 52.22222222222222,
"grad_norm": 1.2565674781799316,
"learning_rate": 8.26e-06,
"loss": 0.4492,
"step": 2350
},
{
"epoch": 53.333333333333336,
"grad_norm": 1.402048110961914,
"learning_rate": 8.010000000000001e-06,
"loss": 0.4477,
"step": 2400
},
{
"epoch": 54.44444444444444,
"grad_norm": 2.7496023178100586,
"learning_rate": 7.76e-06,
"loss": 0.4484,
"step": 2450
},
{
"epoch": 55.55555555555556,
"grad_norm": 2.6707046031951904,
"learning_rate": 7.510000000000001e-06,
"loss": 0.4541,
"step": 2500
},
{
"epoch": 55.55555555555556,
"eval_loss": 0.42134132981300354,
"eval_runtime": 9.5109,
"eval_samples_per_second": 16.718,
"eval_steps_per_second": 2.103,
"step": 2500
},
{
"epoch": 56.666666666666664,
"grad_norm": 1.2582786083221436,
"learning_rate": 7.260000000000001e-06,
"loss": 0.4497,
"step": 2550
},
{
"epoch": 57.77777777777778,
"grad_norm": 1.7701936960220337,
"learning_rate": 7.01e-06,
"loss": 0.443,
"step": 2600
},
{
"epoch": 58.888888888888886,
"grad_norm": 1.3177437782287598,
"learning_rate": 6.760000000000001e-06,
"loss": 0.4432,
"step": 2650
},
{
"epoch": 60.0,
"grad_norm": 1.4644196033477783,
"learning_rate": 6.51e-06,
"loss": 0.4617,
"step": 2700
},
{
"epoch": 61.111111111111114,
"grad_norm": 1.3179391622543335,
"learning_rate": 6.26e-06,
"loss": 0.4563,
"step": 2750
},
{
"epoch": 62.22222222222222,
"grad_norm": 1.5823533535003662,
"learning_rate": 6.01e-06,
"loss": 0.442,
"step": 2800
},
{
"epoch": 63.333333333333336,
"grad_norm": 1.7668250799179077,
"learning_rate": 5.76e-06,
"loss": 0.4421,
"step": 2850
},
{
"epoch": 64.44444444444444,
"grad_norm": 1.5832878351211548,
"learning_rate": 5.510000000000001e-06,
"loss": 0.4467,
"step": 2900
},
{
"epoch": 65.55555555555556,
"grad_norm": 1.7351211309432983,
"learning_rate": 5.2600000000000005e-06,
"loss": 0.4395,
"step": 2950
},
{
"epoch": 66.66666666666667,
"grad_norm": 1.2300982475280762,
"learning_rate": 5.01e-06,
"loss": 0.4409,
"step": 3000
},
{
"epoch": 66.66666666666667,
"eval_loss": 0.415843665599823,
"eval_runtime": 9.3247,
"eval_samples_per_second": 17.051,
"eval_steps_per_second": 2.145,
"step": 3000
},
{
"epoch": 67.77777777777777,
"grad_norm": 1.788053035736084,
"learning_rate": 4.76e-06,
"loss": 0.4393,
"step": 3050
},
{
"epoch": 68.88888888888889,
"grad_norm": 1.4726969003677368,
"learning_rate": 4.510000000000001e-06,
"loss": 0.4383,
"step": 3100
},
{
"epoch": 70.0,
"grad_norm": 2.2467355728149414,
"learning_rate": 4.26e-06,
"loss": 0.4406,
"step": 3150
},
{
"epoch": 71.11111111111111,
"grad_norm": 1.4086047410964966,
"learning_rate": 4.0100000000000006e-06,
"loss": 0.4373,
"step": 3200
},
{
"epoch": 72.22222222222223,
"grad_norm": 1.154167890548706,
"learning_rate": 3.7600000000000004e-06,
"loss": 0.4417,
"step": 3250
},
{
"epoch": 73.33333333333333,
"grad_norm": 1.7119176387786865,
"learning_rate": 3.5100000000000003e-06,
"loss": 0.4319,
"step": 3300
},
{
"epoch": 74.44444444444444,
"grad_norm": 1.240720510482788,
"learning_rate": 3.2600000000000006e-06,
"loss": 0.4386,
"step": 3350
},
{
"epoch": 75.55555555555556,
"grad_norm": 1.281227946281433,
"learning_rate": 3.01e-06,
"loss": 0.4404,
"step": 3400
},
{
"epoch": 76.66666666666667,
"grad_norm": 1.456945776939392,
"learning_rate": 2.7600000000000003e-06,
"loss": 0.434,
"step": 3450
},
{
"epoch": 77.77777777777777,
"grad_norm": 1.233961582183838,
"learning_rate": 2.51e-06,
"loss": 0.4469,
"step": 3500
},
{
"epoch": 77.77777777777777,
"eval_loss": 0.41530150175094604,
"eval_runtime": 9.2669,
"eval_samples_per_second": 17.158,
"eval_steps_per_second": 2.158,
"step": 3500
},
{
"epoch": 78.88888888888889,
"grad_norm": 1.3688515424728394,
"learning_rate": 2.2600000000000004e-06,
"loss": 0.438,
"step": 3550
},
{
"epoch": 80.0,
"grad_norm": 2.092521905899048,
"learning_rate": 2.0100000000000002e-06,
"loss": 0.4347,
"step": 3600
},
{
"epoch": 81.11111111111111,
"grad_norm": 1.285953164100647,
"learning_rate": 1.76e-06,
"loss": 0.4416,
"step": 3650
},
{
"epoch": 82.22222222222223,
"grad_norm": 1.9712281227111816,
"learning_rate": 1.5100000000000002e-06,
"loss": 0.4388,
"step": 3700
},
{
"epoch": 83.33333333333333,
"grad_norm": 1.383866786956787,
"learning_rate": 1.26e-06,
"loss": 0.435,
"step": 3750
},
{
"epoch": 84.44444444444444,
"grad_norm": 1.2552483081817627,
"learning_rate": 1.01e-06,
"loss": 0.4441,
"step": 3800
},
{
"epoch": 85.55555555555556,
"grad_norm": 1.152358055114746,
"learning_rate": 7.6e-07,
"loss": 0.439,
"step": 3850
},
{
"epoch": 86.66666666666667,
"grad_norm": 1.0879451036453247,
"learning_rate": 5.1e-07,
"loss": 0.4355,
"step": 3900
},
{
"epoch": 87.77777777777777,
"grad_norm": 1.6243548393249512,
"learning_rate": 2.6e-07,
"loss": 0.4377,
"step": 3950
},
{
"epoch": 88.88888888888889,
"grad_norm": 1.1442890167236328,
"learning_rate": 1e-08,
"loss": 0.4337,
"step": 4000
},
{
"epoch": 88.88888888888889,
"eval_loss": 0.4139060080051422,
"eval_runtime": 8.8769,
"eval_samples_per_second": 17.912,
"eval_steps_per_second": 2.253,
"step": 4000
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 89,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.752618142335248e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}