|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 400.0, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 9.790476190476192e-06, |
|
"loss": 2.4087, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 9.552380952380953e-06, |
|
"loss": 1.3645, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 9.314285714285714e-06, |
|
"loss": 1.0984, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 9.076190476190477e-06, |
|
"loss": 1.0497, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 8.83809523809524e-06, |
|
"loss": 0.9478, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 8.6e-06, |
|
"loss": 0.8743, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 8.361904761904762e-06, |
|
"loss": 0.8098, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 8.123809523809525e-06, |
|
"loss": 0.7879, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 7.885714285714286e-06, |
|
"loss": 0.702, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 7.647619047619049e-06, |
|
"loss": 0.6197, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 7.40952380952381e-06, |
|
"loss": 0.5729, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 7.1714285714285725e-06, |
|
"loss": 0.5353, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 130.0, |
|
"learning_rate": 6.9333333333333344e-06, |
|
"loss": 0.4739, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 140.0, |
|
"learning_rate": 6.6952380952380956e-06, |
|
"loss": 0.4399, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 150.0, |
|
"learning_rate": 6.4571428571428575e-06, |
|
"loss": 0.3892, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 160.0, |
|
"learning_rate": 6.21904761904762e-06, |
|
"loss": 0.3537, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 170.0, |
|
"learning_rate": 5.9809523809523814e-06, |
|
"loss": 0.3312, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 180.0, |
|
"learning_rate": 5.742857142857143e-06, |
|
"loss": 0.3184, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 190.0, |
|
"learning_rate": 5.504761904761905e-06, |
|
"loss": 0.2993, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"learning_rate": 5.2666666666666665e-06, |
|
"loss": 0.2885, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 210.0, |
|
"learning_rate": 5.028571428571429e-06, |
|
"loss": 0.2745, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 220.0, |
|
"learning_rate": 4.790476190476191e-06, |
|
"loss": 0.2587, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 230.0, |
|
"learning_rate": 4.552380952380952e-06, |
|
"loss": 0.2492, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 240.0, |
|
"learning_rate": 4.314285714285714e-06, |
|
"loss": 0.2436, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 250.0, |
|
"learning_rate": 4.076190476190476e-06, |
|
"loss": 0.2322, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 260.0, |
|
"learning_rate": 3.838095238095238e-06, |
|
"loss": 0.234, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 270.0, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"loss": 0.2289, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 280.0, |
|
"learning_rate": 3.3619047619047622e-06, |
|
"loss": 0.2175, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 290.0, |
|
"learning_rate": 3.1238095238095238e-06, |
|
"loss": 0.2128, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 300.0, |
|
"learning_rate": 2.885714285714286e-06, |
|
"loss": 0.2083, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 310.0, |
|
"learning_rate": 2.6476190476190477e-06, |
|
"loss": 0.2037, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 320.0, |
|
"learning_rate": 2.4095238095238097e-06, |
|
"loss": 0.2018, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 330.0, |
|
"learning_rate": 2.1714285714285716e-06, |
|
"loss": 0.1987, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 340.0, |
|
"learning_rate": 1.9333333333333336e-06, |
|
"loss": 0.1941, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 350.0, |
|
"learning_rate": 1.6952380952380954e-06, |
|
"loss": 0.1941, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 360.0, |
|
"learning_rate": 1.4571428571428573e-06, |
|
"loss": 0.1941, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 370.0, |
|
"learning_rate": 1.219047619047619e-06, |
|
"loss": 0.1892, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 380.0, |
|
"learning_rate": 9.80952380952381e-07, |
|
"loss": 0.1856, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 390.0, |
|
"learning_rate": 7.428571428571429e-07, |
|
"loss": 0.183, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 400.0, |
|
"learning_rate": 5.047619047619048e-07, |
|
"loss": 0.187, |
|
"step": 2000 |
|
} |
|
], |
|
"max_steps": 2100, |
|
"num_train_epochs": 420, |
|
"total_flos": 7.455214301184e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|