|
{ |
|
"best_metric": 0.741533273748008, |
|
"best_model_checkpoint": "trained/hebban-reviews/xlm-roberta-base/checkpoint-2000", |
|
"epoch": 4.382997370727432, |
|
"global_step": 5001, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.504099180163968e-05, |
|
"loss": 0.7622, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_accuracy": 0.7567800788954635, |
|
"eval_f1": 0.7729405363796663, |
|
"eval_loss": 0.626587986946106, |
|
"eval_precision": 0.8122754990827801, |
|
"eval_qwk": 0.6618145848522559, |
|
"eval_recall": 0.7567800788954635, |
|
"eval_runtime": 23.2604, |
|
"eval_samples_per_second": 697.494, |
|
"eval_steps_per_second": 5.46, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.0041991601679665e-05, |
|
"loss": 0.6228, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_accuracy": 0.7375493096646942, |
|
"eval_f1": 0.7560139522475998, |
|
"eval_loss": 0.592847466468811, |
|
"eval_precision": 0.8006409272816242, |
|
"eval_qwk": 0.6464780542008888, |
|
"eval_recall": 0.7375493096646942, |
|
"eval_runtime": 23.1917, |
|
"eval_samples_per_second": 699.56, |
|
"eval_steps_per_second": 5.476, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 3.504299140171966e-05, |
|
"loss": 0.5623, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_accuracy": 0.7543145956607495, |
|
"eval_f1": 0.7717090298131432, |
|
"eval_loss": 0.5968530178070068, |
|
"eval_precision": 0.8134758130284254, |
|
"eval_qwk": 0.6828220433375901, |
|
"eval_recall": 0.7543145956607495, |
|
"eval_runtime": 23.2366, |
|
"eval_samples_per_second": 698.21, |
|
"eval_steps_per_second": 5.466, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.0043991201759648e-05, |
|
"loss": 0.5421, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.8150271203155819, |
|
"eval_f1": 0.8184520029418662, |
|
"eval_loss": 0.5808575749397278, |
|
"eval_precision": 0.8233166962202109, |
|
"eval_qwk": 0.741533273748008, |
|
"eval_recall": 0.8150271203155819, |
|
"eval_runtime": 23.1787, |
|
"eval_samples_per_second": 699.952, |
|
"eval_steps_per_second": 5.479, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.504499100179964e-05, |
|
"loss": 0.5033, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_accuracy": 0.789447731755424, |
|
"eval_f1": 0.8012347601760753, |
|
"eval_loss": 0.640012800693512, |
|
"eval_precision": 0.8269193049518779, |
|
"eval_qwk": 0.7122705559860744, |
|
"eval_recall": 0.789447731755424, |
|
"eval_runtime": 23.1944, |
|
"eval_samples_per_second": 699.48, |
|
"eval_steps_per_second": 5.475, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.0055988802239554e-05, |
|
"loss": 0.4493, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"eval_accuracy": 0.7778599605522682, |
|
"eval_f1": 0.7918891407245614, |
|
"eval_loss": 0.6219201683998108, |
|
"eval_precision": 0.8239935574770723, |
|
"eval_qwk": 0.7029409846056511, |
|
"eval_recall": 0.7778599605522682, |
|
"eval_runtime": 23.0915, |
|
"eval_samples_per_second": 702.598, |
|
"eval_steps_per_second": 5.5, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 1.5066986602679465e-05, |
|
"loss": 0.4391, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"eval_accuracy": 0.784948224852071, |
|
"eval_f1": 0.7976824915929677, |
|
"eval_loss": 0.6451554298400879, |
|
"eval_precision": 0.8255295840982949, |
|
"eval_qwk": 0.7107576916204621, |
|
"eval_recall": 0.784948224852071, |
|
"eval_runtime": 23.2205, |
|
"eval_samples_per_second": 698.694, |
|
"eval_steps_per_second": 5.469, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 1.0067986402719456e-05, |
|
"loss": 0.3756, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"eval_accuracy": 0.7970290927021696, |
|
"eval_f1": 0.8070579315874782, |
|
"eval_loss": 0.6958814263343811, |
|
"eval_precision": 0.8265215874136934, |
|
"eval_qwk": 0.7249078215037359, |
|
"eval_recall": 0.7970290927021696, |
|
"eval_runtime": 23.1336, |
|
"eval_samples_per_second": 701.319, |
|
"eval_steps_per_second": 5.49, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 5.068986202759449e-06, |
|
"loss": 0.3633, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"eval_accuracy": 0.7962894477317555, |
|
"eval_f1": 0.8063568557789076, |
|
"eval_loss": 0.6968616247177124, |
|
"eval_precision": 0.8256437219303169, |
|
"eval_qwk": 0.7253147876480203, |
|
"eval_recall": 0.7962894477317555, |
|
"eval_runtime": 23.1633, |
|
"eval_samples_per_second": 700.418, |
|
"eval_steps_per_second": 5.483, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 6.998600279944012e-08, |
|
"loss": 0.3263, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"eval_accuracy": 0.7972140039447732, |
|
"eval_f1": 0.8072907591794813, |
|
"eval_loss": 0.7634969353675842, |
|
"eval_precision": 0.826968890246297, |
|
"eval_qwk": 0.7254593503280649, |
|
"eval_recall": 0.7972140039447732, |
|
"eval_runtime": 23.3008, |
|
"eval_samples_per_second": 696.286, |
|
"eval_steps_per_second": 5.45, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"step": 5001, |
|
"total_flos": 1.6838837781764506e+17, |
|
"train_loss": 0.4945967703420051, |
|
"train_runtime": 2688.9042, |
|
"train_samples_per_second": 238.063, |
|
"train_steps_per_second": 1.86 |
|
} |
|
], |
|
"max_steps": 5001, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.6838837781764506e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|