|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.774538386783285, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9912109375000002e-05, |
|
"loss": 1.9656, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.98193359375e-05, |
|
"loss": 1.8445, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.97216796875e-05, |
|
"loss": 1.59, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.9624023437500002e-05, |
|
"loss": 1.6275, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.9526367187500002e-05, |
|
"loss": 1.473, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.9428710937500003e-05, |
|
"loss": 1.3701, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 1.93310546875e-05, |
|
"loss": 1.3224, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.92333984375e-05, |
|
"loss": 1.1423, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.91357421875e-05, |
|
"loss": 1.1652, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.9038085937500002e-05, |
|
"loss": 1.1422, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.8940429687500002e-05, |
|
"loss": 1.002, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.8842773437500003e-05, |
|
"loss": 1.0779, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.87451171875e-05, |
|
"loss": 0.9887, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.86474609375e-05, |
|
"loss": 0.9543, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.85498046875e-05, |
|
"loss": 0.9371, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.8452148437500002e-05, |
|
"loss": 0.8701, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.8354492187500003e-05, |
|
"loss": 0.875, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.82568359375e-05, |
|
"loss": 0.7843, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.81591796875e-05, |
|
"loss": 0.7945, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.80615234375e-05, |
|
"loss": 0.799, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.79638671875e-05, |
|
"loss": 0.7623, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 1.7866210937500002e-05, |
|
"loss": 0.7263, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.7768554687500003e-05, |
|
"loss": 0.7779, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.76708984375e-05, |
|
"loss": 0.695, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.75732421875e-05, |
|
"loss": 0.7344, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.74755859375e-05, |
|
"loss": 0.695, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 1.7377929687500002e-05, |
|
"loss": 0.6504, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.7280273437500002e-05, |
|
"loss": 0.6447, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.7182617187500003e-05, |
|
"loss": 0.6931, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 1.70849609375e-05, |
|
"loss": 0.6256, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.69873046875e-05, |
|
"loss": 0.6132, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.68896484375e-05, |
|
"loss": 0.6001, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 1.6791992187500002e-05, |
|
"loss": 0.6176, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 1.6694335937500002e-05, |
|
"loss": 0.5709, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 1.65966796875e-05, |
|
"loss": 0.564, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 1.64990234375e-05, |
|
"loss": 0.5969, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 1.64013671875e-05, |
|
"loss": 0.5484, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 1.63037109375e-05, |
|
"loss": 0.5667, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.6206054687500002e-05, |
|
"loss": 0.5442, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 1.6108398437500003e-05, |
|
"loss": 0.4857, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 1.60107421875e-05, |
|
"loss": 0.5225, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 1.59130859375e-05, |
|
"loss": 0.5457, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 1.58154296875e-05, |
|
"loss": 0.5315, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 1.57177734375e-05, |
|
"loss": 0.5345, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.5620117187500002e-05, |
|
"loss": 0.5169, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 1.5522460937500003e-05, |
|
"loss": 0.5115, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 1.54248046875e-05, |
|
"loss": 0.4913, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 1.53271484375e-05, |
|
"loss": 0.4868, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 1.5229492187500001e-05, |
|
"loss": 0.5226, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 1.5131835937500002e-05, |
|
"loss": 0.4061, |
|
"step": 1000 |
|
} |
|
], |
|
"max_steps": 4096, |
|
"num_train_epochs": 32, |
|
"total_flos": 1.2995638935552e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|