fil_b64_le5_s8000 / last-checkpoint /trainer_state.json
mikhail-panzo's picture
Training in progress, step 1500, checkpoint
c9555d9 verified
raw
history blame
No virus
6.42 kB
{
"best_metric": 0.4311259388923645,
"best_model_checkpoint": "mikhail-panzo/fil_b64_le5_s8000/checkpoint-1500",
"epoch": 65.21739130434783,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.1739130434782608,
"grad_norm": 4.140745639801025,
"learning_rate": 2.4500000000000004e-07,
"loss": 0.8272,
"step": 50
},
{
"epoch": 4.3478260869565215,
"grad_norm": 1.78262197971344,
"learning_rate": 4.95e-07,
"loss": 0.7463,
"step": 100
},
{
"epoch": 6.521739130434782,
"grad_norm": 4.001489639282227,
"learning_rate": 7.450000000000001e-07,
"loss": 0.7504,
"step": 150
},
{
"epoch": 8.695652173913043,
"grad_norm": 1.0013818740844727,
"learning_rate": 9.950000000000002e-07,
"loss": 0.7295,
"step": 200
},
{
"epoch": 10.869565217391305,
"grad_norm": 1.9014511108398438,
"learning_rate": 1.2450000000000002e-06,
"loss": 0.7182,
"step": 250
},
{
"epoch": 13.043478260869565,
"grad_norm": 13.358899116516113,
"learning_rate": 1.495e-06,
"loss": 0.7037,
"step": 300
},
{
"epoch": 15.217391304347826,
"grad_norm": 1.4039109945297241,
"learning_rate": 1.745e-06,
"loss": 0.6966,
"step": 350
},
{
"epoch": 17.391304347826086,
"grad_norm": 3.0226972103118896,
"learning_rate": 1.9950000000000004e-06,
"loss": 0.6702,
"step": 400
},
{
"epoch": 19.565217391304348,
"grad_norm": 1.716300129890442,
"learning_rate": 2.24e-06,
"loss": 0.6487,
"step": 450
},
{
"epoch": 21.73913043478261,
"grad_norm": 2.9351744651794434,
"learning_rate": 2.4900000000000003e-06,
"loss": 0.6071,
"step": 500
},
{
"epoch": 21.73913043478261,
"eval_loss": 0.521294116973877,
"eval_runtime": 6.6451,
"eval_samples_per_second": 23.927,
"eval_steps_per_second": 3.01,
"step": 500
},
{
"epoch": 23.91304347826087,
"grad_norm": 1.9992552995681763,
"learning_rate": 2.7400000000000004e-06,
"loss": 0.5759,
"step": 550
},
{
"epoch": 26.08695652173913,
"grad_norm": 1.2594470977783203,
"learning_rate": 2.99e-06,
"loss": 0.56,
"step": 600
},
{
"epoch": 28.26086956521739,
"grad_norm": 1.0602363348007202,
"learning_rate": 3.2400000000000003e-06,
"loss": 0.5709,
"step": 650
},
{
"epoch": 30.434782608695652,
"grad_norm": 1.116231083869934,
"learning_rate": 3.49e-06,
"loss": 0.5322,
"step": 700
},
{
"epoch": 32.608695652173914,
"grad_norm": 1.5203620195388794,
"learning_rate": 3.74e-06,
"loss": 0.5264,
"step": 750
},
{
"epoch": 34.78260869565217,
"grad_norm": 1.6139357089996338,
"learning_rate": 3.990000000000001e-06,
"loss": 0.5191,
"step": 800
},
{
"epoch": 36.95652173913044,
"grad_norm": 1.436273217201233,
"learning_rate": 4.24e-06,
"loss": 0.5272,
"step": 850
},
{
"epoch": 39.130434782608695,
"grad_norm": 1.757323145866394,
"learning_rate": 4.49e-06,
"loss": 0.5016,
"step": 900
},
{
"epoch": 41.30434782608695,
"grad_norm": 0.9879368543624878,
"learning_rate": 4.74e-06,
"loss": 0.4975,
"step": 950
},
{
"epoch": 43.47826086956522,
"grad_norm": 1.3402222394943237,
"learning_rate": 4.9900000000000005e-06,
"loss": 0.5126,
"step": 1000
},
{
"epoch": 43.47826086956522,
"eval_loss": 0.4506620466709137,
"eval_runtime": 6.4644,
"eval_samples_per_second": 24.596,
"eval_steps_per_second": 3.094,
"step": 1000
},
{
"epoch": 45.65217391304348,
"grad_norm": 1.9366440773010254,
"learning_rate": 5.240000000000001e-06,
"loss": 0.5043,
"step": 1050
},
{
"epoch": 47.82608695652174,
"grad_norm": 1.5358505249023438,
"learning_rate": 5.490000000000001e-06,
"loss": 0.5034,
"step": 1100
},
{
"epoch": 50.0,
"grad_norm": 1.6836323738098145,
"learning_rate": 5.74e-06,
"loss": 0.499,
"step": 1150
},
{
"epoch": 52.17391304347826,
"grad_norm": 1.5092076063156128,
"learning_rate": 5.99e-06,
"loss": 0.487,
"step": 1200
},
{
"epoch": 54.34782608695652,
"grad_norm": 1.0751235485076904,
"learning_rate": 6.24e-06,
"loss": 0.4871,
"step": 1250
},
{
"epoch": 56.52173913043478,
"grad_norm": 1.327528476715088,
"learning_rate": 6.4900000000000005e-06,
"loss": 0.4911,
"step": 1300
},
{
"epoch": 58.69565217391305,
"grad_norm": 1.7827174663543701,
"learning_rate": 6.740000000000001e-06,
"loss": 0.4772,
"step": 1350
},
{
"epoch": 60.869565217391305,
"grad_norm": 1.2707558870315552,
"learning_rate": 6.99e-06,
"loss": 0.4786,
"step": 1400
},
{
"epoch": 63.04347826086956,
"grad_norm": 1.0794321298599243,
"learning_rate": 7.24e-06,
"loss": 0.4741,
"step": 1450
},
{
"epoch": 65.21739130434783,
"grad_norm": 2.0180580615997314,
"learning_rate": 7.49e-06,
"loss": 0.4749,
"step": 1500
},
{
"epoch": 65.21739130434783,
"eval_loss": 0.4311259388923645,
"eval_runtime": 6.4494,
"eval_samples_per_second": 24.653,
"eval_steps_per_second": 3.101,
"step": 1500
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 348,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.226780270809741e+16,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}