|
{ |
|
"best_metric": 0.8130081300813008, |
|
"best_model_checkpoint": "distilbert-base-multilingual-cased-hyper-matt/run-vrziupab/checkpoint-100", |
|
"epoch": 4.0, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 1.6790717840194702, |
|
"learning_rate": 3.3566715164263906e-05, |
|
"loss": 0.5543, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.9814144372940063, |
|
"learning_rate": 2.983708014601236e-05, |
|
"loss": 0.4322, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.85, |
|
"eval_f1": 0.7744360902255639, |
|
"eval_loss": 0.3429238200187683, |
|
"eval_precision": 0.7202797202797203, |
|
"eval_recall": 0.8373983739837398, |
|
"eval_runtime": 1.5447, |
|
"eval_samples_per_second": 258.95, |
|
"eval_steps_per_second": 16.184, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 5.034728050231934, |
|
"learning_rate": 2.6107445127760812e-05, |
|
"loss": 0.3387, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 5.807296276092529, |
|
"learning_rate": 2.237781010950927e-05, |
|
"loss": 0.3155, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 5.17315673828125, |
|
"learning_rate": 1.8648175091257724e-05, |
|
"loss": 0.2955, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.865, |
|
"eval_f1": 0.7969924812030075, |
|
"eval_loss": 0.3180018663406372, |
|
"eval_precision": 0.7412587412587412, |
|
"eval_recall": 0.8617886178861789, |
|
"eval_runtime": 1.515, |
|
"eval_samples_per_second": 264.023, |
|
"eval_steps_per_second": 16.501, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 3.6483638286590576, |
|
"learning_rate": 1.491854007300618e-05, |
|
"loss": 0.2305, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 1.6533458232879639, |
|
"learning_rate": 1.1188905054754635e-05, |
|
"loss": 0.2633, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.88, |
|
"eval_f1": 0.8048780487804879, |
|
"eval_loss": 0.29483410716056824, |
|
"eval_precision": 0.8048780487804879, |
|
"eval_recall": 0.8048780487804879, |
|
"eval_runtime": 1.5239, |
|
"eval_samples_per_second": 262.477, |
|
"eval_steps_per_second": 16.405, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 3.5790436267852783, |
|
"learning_rate": 7.45927003650309e-06, |
|
"loss": 0.229, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"grad_norm": 3.870107889175415, |
|
"learning_rate": 3.729635018251545e-06, |
|
"loss": 0.1866, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 2.5545942783355713, |
|
"learning_rate": 0.0, |
|
"loss": 0.1838, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.885, |
|
"eval_f1": 0.8130081300813008, |
|
"eval_loss": 0.29451921582221985, |
|
"eval_precision": 0.8130081300813008, |
|
"eval_recall": 0.8130081300813008, |
|
"eval_runtime": 1.5767, |
|
"eval_samples_per_second": 253.702, |
|
"eval_steps_per_second": 15.856, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 847261481803776.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": { |
|
"_wandb": {}, |
|
"assignments": {}, |
|
"learning_rate": 3.729635018251545e-05, |
|
"metric": "eval/loss", |
|
"num_train_epochs": 4, |
|
"per_device_train_batch_size": 64, |
|
"seed": 27 |
|
} |
|
} |
|
|