fil_b32_le3_s4000 / checkpoint-2500 /trainer_state.json
mikhail-panzo's picture
Training in progress, step 2500, checkpoint
98e9767 verified
raw
history blame
10 kB
{
"best_metric": 0.4463141858577728,
"best_model_checkpoint": "mikhail-panzo/fil_b32_le3_s4000/checkpoint-500",
"epoch": 55.55555555555556,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.1111111111111112,
"grad_norm": 3.548590898513794,
"learning_rate": 2.5e-05,
"loss": 0.7354,
"step": 50
},
{
"epoch": 2.2222222222222223,
"grad_norm": 5.517350196838379,
"learning_rate": 4.9500000000000004e-05,
"loss": 0.5892,
"step": 100
},
{
"epoch": 3.3333333333333335,
"grad_norm": 4.143002033233643,
"learning_rate": 7.45e-05,
"loss": 0.532,
"step": 150
},
{
"epoch": 4.444444444444445,
"grad_norm": 6.414477825164795,
"learning_rate": 9.95e-05,
"loss": 0.5284,
"step": 200
},
{
"epoch": 5.555555555555555,
"grad_norm": 2.265143394470215,
"learning_rate": 0.0001245,
"loss": 0.517,
"step": 250
},
{
"epoch": 6.666666666666667,
"grad_norm": 4.954311847686768,
"learning_rate": 0.0001495,
"loss": 0.5295,
"step": 300
},
{
"epoch": 7.777777777777778,
"grad_norm": 2.5993571281433105,
"learning_rate": 0.00017449999999999999,
"loss": 0.517,
"step": 350
},
{
"epoch": 8.88888888888889,
"grad_norm": 3.27908992767334,
"learning_rate": 0.00019950000000000002,
"loss": 0.4939,
"step": 400
},
{
"epoch": 10.0,
"grad_norm": 4.22811222076416,
"learning_rate": 0.0002245,
"loss": 0.5043,
"step": 450
},
{
"epoch": 11.11111111111111,
"grad_norm": 2.7051336765289307,
"learning_rate": 0.0002495,
"loss": 0.5028,
"step": 500
},
{
"epoch": 11.11111111111111,
"eval_loss": 0.4463141858577728,
"eval_runtime": 16.8864,
"eval_samples_per_second": 9.416,
"eval_steps_per_second": 1.184,
"step": 500
},
{
"epoch": 12.222222222222221,
"grad_norm": 3.0811643600463867,
"learning_rate": 0.0002745,
"loss": 0.545,
"step": 550
},
{
"epoch": 13.333333333333334,
"grad_norm": 7.707492828369141,
"learning_rate": 0.0002995,
"loss": 0.4979,
"step": 600
},
{
"epoch": 14.444444444444445,
"grad_norm": 5.85659122467041,
"learning_rate": 0.00032450000000000003,
"loss": 0.4916,
"step": 650
},
{
"epoch": 15.555555555555555,
"grad_norm": 8.300439834594727,
"learning_rate": 0.0003495,
"loss": 0.5112,
"step": 700
},
{
"epoch": 16.666666666666668,
"grad_norm": 10.664773941040039,
"learning_rate": 0.0003745,
"loss": 0.5312,
"step": 750
},
{
"epoch": 17.77777777777778,
"grad_norm": 6.4097161293029785,
"learning_rate": 0.0003995,
"loss": 0.6087,
"step": 800
},
{
"epoch": 18.88888888888889,
"grad_norm": 5.435739994049072,
"learning_rate": 0.0004245,
"loss": 0.6127,
"step": 850
},
{
"epoch": 20.0,
"grad_norm": 7.4649834632873535,
"learning_rate": 0.00044950000000000003,
"loss": 0.6904,
"step": 900
},
{
"epoch": 21.11111111111111,
"grad_norm": 10.81910228729248,
"learning_rate": 0.0004745,
"loss": 0.6948,
"step": 950
},
{
"epoch": 22.22222222222222,
"grad_norm": 4.4550886154174805,
"learning_rate": 0.0004995,
"loss": 0.6348,
"step": 1000
},
{
"epoch": 22.22222222222222,
"eval_loss": 0.5990382432937622,
"eval_runtime": 9.0191,
"eval_samples_per_second": 17.629,
"eval_steps_per_second": 2.218,
"step": 1000
},
{
"epoch": 23.333333333333332,
"grad_norm": 3.101961374282837,
"learning_rate": 0.0005245,
"loss": 0.6609,
"step": 1050
},
{
"epoch": 24.444444444444443,
"grad_norm": 5.606886386871338,
"learning_rate": 0.0005495,
"loss": 0.6128,
"step": 1100
},
{
"epoch": 25.555555555555557,
"grad_norm": 3.7152516841888428,
"learning_rate": 0.0005745,
"loss": 0.6197,
"step": 1150
},
{
"epoch": 26.666666666666668,
"grad_norm": 2.9999840259552,
"learning_rate": 0.0005995000000000001,
"loss": 0.6346,
"step": 1200
},
{
"epoch": 27.77777777777778,
"grad_norm": 3.5235953330993652,
"learning_rate": 0.0006245000000000001,
"loss": 0.6246,
"step": 1250
},
{
"epoch": 28.88888888888889,
"grad_norm": 3.9665956497192383,
"learning_rate": 0.0006495,
"loss": 0.6143,
"step": 1300
},
{
"epoch": 30.0,
"grad_norm": 2.920466661453247,
"learning_rate": 0.000674,
"loss": 0.6245,
"step": 1350
},
{
"epoch": 31.11111111111111,
"grad_norm": 16.839616775512695,
"learning_rate": 0.000699,
"loss": 0.8179,
"step": 1400
},
{
"epoch": 32.22222222222222,
"grad_norm": 0.23306678235530853,
"learning_rate": 0.0007235000000000001,
"loss": 1.8511,
"step": 1450
},
{
"epoch": 33.333333333333336,
"grad_norm": 0.6476005911827087,
"learning_rate": 0.000748,
"loss": 1.7172,
"step": 1500
},
{
"epoch": 33.333333333333336,
"eval_loss": 1.566503882408142,
"eval_runtime": 8.82,
"eval_samples_per_second": 18.027,
"eval_steps_per_second": 2.268,
"step": 1500
},
{
"epoch": 34.44444444444444,
"grad_norm": 0.22506840527057648,
"learning_rate": 0.000773,
"loss": 1.6751,
"step": 1550
},
{
"epoch": 35.55555555555556,
"grad_norm": 0.2123740017414093,
"learning_rate": 0.0007980000000000001,
"loss": 1.585,
"step": 1600
},
{
"epoch": 36.666666666666664,
"grad_norm": 0.2724120318889618,
"learning_rate": 0.000823,
"loss": 1.6075,
"step": 1650
},
{
"epoch": 37.77777777777778,
"grad_norm": 0.1595434546470642,
"learning_rate": 0.000848,
"loss": 1.5939,
"step": 1700
},
{
"epoch": 38.888888888888886,
"grad_norm": 0.329246461391449,
"learning_rate": 0.000873,
"loss": 1.6055,
"step": 1750
},
{
"epoch": 40.0,
"grad_norm": 0.5109756588935852,
"learning_rate": 0.000898,
"loss": 1.5925,
"step": 1800
},
{
"epoch": 41.111111111111114,
"grad_norm": 0.13621735572814941,
"learning_rate": 0.0009230000000000001,
"loss": 1.5823,
"step": 1850
},
{
"epoch": 42.22222222222222,
"grad_norm": 0.13570882380008698,
"learning_rate": 0.000948,
"loss": 1.5806,
"step": 1900
},
{
"epoch": 43.333333333333336,
"grad_norm": 0.10358592867851257,
"learning_rate": 0.000973,
"loss": 1.5762,
"step": 1950
},
{
"epoch": 44.44444444444444,
"grad_norm": 0.2059299349784851,
"learning_rate": 0.000998,
"loss": 1.5863,
"step": 2000
},
{
"epoch": 44.44444444444444,
"eval_loss": 1.5431957244873047,
"eval_runtime": 8.5158,
"eval_samples_per_second": 18.671,
"eval_steps_per_second": 2.349,
"step": 2000
},
{
"epoch": 45.55555555555556,
"grad_norm": 0.1365118771791458,
"learning_rate": 0.000977,
"loss": 1.578,
"step": 2050
},
{
"epoch": 46.666666666666664,
"grad_norm": 0.482020765542984,
"learning_rate": 0.0009519999999999999,
"loss": 1.5794,
"step": 2100
},
{
"epoch": 47.77777777777778,
"grad_norm": 0.0648043230175972,
"learning_rate": 0.0009270000000000001,
"loss": 1.6363,
"step": 2150
},
{
"epoch": 48.888888888888886,
"grad_norm": 0.11886011809110641,
"learning_rate": 0.000902,
"loss": 1.5797,
"step": 2200
},
{
"epoch": 50.0,
"grad_norm": 0.35273927450180054,
"learning_rate": 0.0008770000000000001,
"loss": 1.5799,
"step": 2250
},
{
"epoch": 51.111111111111114,
"grad_norm": 0.3486323356628418,
"learning_rate": 0.000852,
"loss": 1.5786,
"step": 2300
},
{
"epoch": 52.22222222222222,
"grad_norm": 0.10198960453271866,
"learning_rate": 0.0008269999999999999,
"loss": 1.5792,
"step": 2350
},
{
"epoch": 53.333333333333336,
"grad_norm": 0.45619985461235046,
"learning_rate": 0.0008020000000000001,
"loss": 1.5791,
"step": 2400
},
{
"epoch": 54.44444444444444,
"grad_norm": 0.18695257604122162,
"learning_rate": 0.000777,
"loss": 1.589,
"step": 2450
},
{
"epoch": 55.55555555555556,
"grad_norm": 0.16741132736206055,
"learning_rate": 0.0007520000000000001,
"loss": 1.5854,
"step": 2500
},
{
"epoch": 55.55555555555556,
"eval_loss": 1.5435634851455688,
"eval_runtime": 8.0681,
"eval_samples_per_second": 19.707,
"eval_steps_per_second": 2.479,
"step": 2500
}
],
"logging_steps": 50,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 89,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.7201159877249216e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}