|
{ |
|
"best_metric": 0.8568872987477638, |
|
"best_model_checkpoint": "/scratch/mrahma45/pixel/finetuned_models/bert/bert-base-finetuned-pos-ud-Hindi-HDTB/checkpoint-3000", |
|
"epoch": 13.221153846153847, |
|
"global_step": 5500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.9500000000000004e-05, |
|
"loss": 1.5512, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.966778523489933e-05, |
|
"loss": 0.7227, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.933221476510068e-05, |
|
"loss": 0.6046, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.8996644295302016e-05, |
|
"loss": 0.5466, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.8661073825503355e-05, |
|
"loss": 0.5069, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_accuracy": 0.8260783144504075, |
|
"eval_loss": 0.4958227276802063, |
|
"eval_runtime": 8.4268, |
|
"eval_samples_per_second": 196.872, |
|
"eval_steps_per_second": 24.683, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.83255033557047e-05, |
|
"loss": 0.4882, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.798993288590604e-05, |
|
"loss": 0.4728, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 4.765436241610739e-05, |
|
"loss": 0.4576, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 4.7318791946308726e-05, |
|
"loss": 0.4339, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.698322147651007e-05, |
|
"loss": 0.4185, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_accuracy": 0.8392821648635602, |
|
"eval_loss": 0.46815404295921326, |
|
"eval_runtime": 8.5929, |
|
"eval_samples_per_second": 193.067, |
|
"eval_steps_per_second": 24.206, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.664765100671141e-05, |
|
"loss": 0.4086, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.631208053691276e-05, |
|
"loss": 0.4081, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.5976510067114097e-05, |
|
"loss": 0.3903, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 4.564093959731544e-05, |
|
"loss": 0.3658, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 4.5305369127516775e-05, |
|
"loss": 0.3701, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_accuracy": 0.8504699434931993, |
|
"eval_loss": 0.4506780803203583, |
|
"eval_runtime": 8.4658, |
|
"eval_samples_per_second": 195.965, |
|
"eval_steps_per_second": 24.569, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 4.496979865771812e-05, |
|
"loss": 0.3701, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 4.463422818791946e-05, |
|
"loss": 0.3607, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 4.4298657718120806e-05, |
|
"loss": 0.3278, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 4.3963087248322146e-05, |
|
"loss": 0.3374, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 4.362751677852349e-05, |
|
"loss": 0.3336, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"eval_accuracy": 0.8520316892409916, |
|
"eval_loss": 0.44800490140914917, |
|
"eval_runtime": 8.4418, |
|
"eval_samples_per_second": 196.522, |
|
"eval_steps_per_second": 24.639, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 4.329194630872484e-05, |
|
"loss": 0.3316, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 4.295637583892618e-05, |
|
"loss": 0.2929, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 4.262080536912752e-05, |
|
"loss": 0.3, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 4.228523489932886e-05, |
|
"loss": 0.3029, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 4.194966442953021e-05, |
|
"loss": 0.3045, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"eval_accuracy": 0.854814436209785, |
|
"eval_loss": 0.4508860409259796, |
|
"eval_runtime": 8.4218, |
|
"eval_samples_per_second": 196.99, |
|
"eval_steps_per_second": 24.698, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 4.161409395973155e-05, |
|
"loss": 0.265, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 4.127852348993289e-05, |
|
"loss": 0.2695, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 4.0942953020134226e-05, |
|
"loss": 0.2749, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 4.060738255033557e-05, |
|
"loss": 0.2735, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 4.027181208053691e-05, |
|
"loss": 0.2419, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"eval_accuracy": 0.8568872987477638, |
|
"eval_loss": 0.4852503538131714, |
|
"eval_runtime": 8.4429, |
|
"eval_samples_per_second": 196.497, |
|
"eval_steps_per_second": 24.636, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 3.993624161073826e-05, |
|
"loss": 0.2386, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 3.96006711409396e-05, |
|
"loss": 0.241, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 3.926510067114094e-05, |
|
"loss": 0.2447, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 3.892953020134228e-05, |
|
"loss": 0.2193, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 3.859395973154363e-05, |
|
"loss": 0.2148, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"eval_accuracy": 0.855439134508902, |
|
"eval_loss": 0.5063510537147522, |
|
"eval_runtime": 8.6104, |
|
"eval_samples_per_second": 192.673, |
|
"eval_steps_per_second": 24.157, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 3.825838926174497e-05, |
|
"loss": 0.2085, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 3.7922818791946313e-05, |
|
"loss": 0.2195, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 3.758724832214765e-05, |
|
"loss": 0.2027, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 3.725167785234899e-05, |
|
"loss": 0.183, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 3.691610738255034e-05, |
|
"loss": 0.1883, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"eval_accuracy": 0.8551835761138087, |
|
"eval_loss": 0.5312172174453735, |
|
"eval_runtime": 8.4553, |
|
"eval_samples_per_second": 196.208, |
|
"eval_steps_per_second": 24.6, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 3.658053691275168e-05, |
|
"loss": 0.1903, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 3.624496644295302e-05, |
|
"loss": 0.1764, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 10.34, |
|
"learning_rate": 3.590939597315436e-05, |
|
"loss": 0.158, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"learning_rate": 3.557382550335571e-05, |
|
"loss": 0.1643, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 3.523825503355705e-05, |
|
"loss": 0.1642, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"eval_accuracy": 0.852003293863759, |
|
"eval_loss": 0.5592557787895203, |
|
"eval_runtime": 8.4747, |
|
"eval_samples_per_second": 195.759, |
|
"eval_steps_per_second": 24.544, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 3.4902684563758394e-05, |
|
"loss": 0.1645, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 3.456711409395973e-05, |
|
"loss": 0.1353, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 11.54, |
|
"learning_rate": 3.423154362416108e-05, |
|
"loss": 0.1462, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"learning_rate": 3.389597315436242e-05, |
|
"loss": 0.1451, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 3.356040268456376e-05, |
|
"loss": 0.1488, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"eval_accuracy": 0.8523156430133174, |
|
"eval_loss": 0.587023913860321, |
|
"eval_runtime": 8.4932, |
|
"eval_samples_per_second": 195.333, |
|
"eval_steps_per_second": 24.49, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.26, |
|
"learning_rate": 3.32248322147651e-05, |
|
"loss": 0.1163, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.288926174496644e-05, |
|
"loss": 0.1224, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 12.74, |
|
"learning_rate": 3.255369127516779e-05, |
|
"loss": 0.1281, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 3.221812080536913e-05, |
|
"loss": 0.1305, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 3.1882550335570474e-05, |
|
"loss": 0.1027, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"eval_accuracy": 0.8522588522588522, |
|
"eval_loss": 0.6439880132675171, |
|
"eval_runtime": 8.5081, |
|
"eval_samples_per_second": 194.991, |
|
"eval_steps_per_second": 24.447, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"step": 5500, |
|
"total_flos": 2.298364364453069e+16, |
|
"train_loss": 0.310650390625, |
|
"train_runtime": 1477.8296, |
|
"train_samples_per_second": 324.801, |
|
"train_steps_per_second": 10.15 |
|
} |
|
], |
|
"max_steps": 15000, |
|
"num_train_epochs": 37, |
|
"total_flos": 2.298364364453069e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|