|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"global_step": 2103, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.88112220637185e-05, |
|
"loss": 2.1376, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7622444127437e-05, |
|
"loss": 2.0469, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.64336661911555e-05, |
|
"loss": 1.9498, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.524488825487399e-05, |
|
"loss": 1.9426, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.405611031859249e-05, |
|
"loss": 1.9435, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.286733238231099e-05, |
|
"loss": 1.9045, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.167855444602948e-05, |
|
"loss": 1.906, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.048977650974798e-05, |
|
"loss": 1.8587, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.930099857346648e-05, |
|
"loss": 1.8745, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.811222063718498e-05, |
|
"loss": 1.8128, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 3.692344270090347e-05, |
|
"loss": 1.8424, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.573466476462197e-05, |
|
"loss": 1.8223, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.454588682834047e-05, |
|
"loss": 1.7888, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3357108892058964e-05, |
|
"loss": 1.9001, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.216833095577747e-05, |
|
"loss": 1.8496, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.097955301949596e-05, |
|
"loss": 1.7976, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.9790775083214456e-05, |
|
"loss": 1.8336, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.8601997146932952e-05, |
|
"loss": 1.8062, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 2.7413219210651452e-05, |
|
"loss": 1.7623, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.6224441274369948e-05, |
|
"loss": 1.7469, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.5035663338088444e-05, |
|
"loss": 1.7603, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.3846885401806944e-05, |
|
"loss": 1.784, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 2.265810746552544e-05, |
|
"loss": 1.7731, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 2.146932952924394e-05, |
|
"loss": 1.8078, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.0280551592962436e-05, |
|
"loss": 1.7908, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 1.9091773656680932e-05, |
|
"loss": 1.8231, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.790299572039943e-05, |
|
"loss": 1.7073, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6714217784117928e-05, |
|
"loss": 1.7253, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.5525439847836425e-05, |
|
"loss": 1.688, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.433666191155492e-05, |
|
"loss": 1.7227, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.314788397527342e-05, |
|
"loss": 1.713, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.1959106038991917e-05, |
|
"loss": 1.6854, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 1.0770328102710415e-05, |
|
"loss": 1.6999, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.581550166428911e-06, |
|
"loss": 1.7437, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.392772230147409e-06, |
|
"loss": 1.7459, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 7.203994293865907e-06, |
|
"loss": 1.6915, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.015216357584404e-06, |
|
"loss": 1.7785, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.826438421302901e-06, |
|
"loss": 1.7103, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 3.6376604850213982e-06, |
|
"loss": 1.7363, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 2.4488825487398953e-06, |
|
"loss": 1.7628, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 1.2601046124583929e-06, |
|
"loss": 1.6434, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 7.132667617689017e-08, |
|
"loss": 1.6829, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 2103, |
|
"train_runtime": 922.3702, |
|
"train_samples_per_second": 2.28 |
|
} |
|
], |
|
"max_steps": 2103, |
|
"num_train_epochs": 3, |
|
"total_flos": 3889982746460160, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|