|
{ |
|
"best_metric": 0.7326732673267327, |
|
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-0/checkpoint-24", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 72, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 1.6479910612106323, |
|
"learning_rate": 3.3535706116592527e-06, |
|
"loss": 0.699, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.6159653663635254, |
|
"learning_rate": 6.707141223318505e-06, |
|
"loss": 0.6932, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.5607424974441528, |
|
"learning_rate": 1.0060711834977758e-05, |
|
"loss": 0.6812, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 1.401130199432373, |
|
"learning_rate": 1.341428244663701e-05, |
|
"loss": 0.6633, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.680633544921875, |
|
"eval_runtime": 1.3853, |
|
"eval_samples_per_second": 46.201, |
|
"eval_steps_per_second": 5.775, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.9690802693367004, |
|
"learning_rate": 1.602261514459421e-05, |
|
"loss": 0.6644, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 1.8331551551818848, |
|
"learning_rate": 1.564999618774318e-05, |
|
"loss": 0.6073, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 1.799914002418518, |
|
"learning_rate": 1.527737723089215e-05, |
|
"loss": 0.5805, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.515367865562439, |
|
"learning_rate": 1.4904758274041123e-05, |
|
"loss": 0.6465, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.6179113388061523, |
|
"learning_rate": 1.4532139317190096e-05, |
|
"loss": 0.6147, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.701690673828125, |
|
"eval_runtime": 1.4198, |
|
"eval_samples_per_second": 45.078, |
|
"eval_steps_per_second": 5.635, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 0.5513622760772705, |
|
"learning_rate": 1.4159520360339068e-05, |
|
"loss": 0.6301, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 0.5830497741699219, |
|
"learning_rate": 1.3786901403488039e-05, |
|
"loss": 0.5836, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.6891571879386902, |
|
"learning_rate": 1.341428244663701e-05, |
|
"loss": 0.6309, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"grad_norm": 1.3063991069793701, |
|
"learning_rate": 1.3041663489785983e-05, |
|
"loss": 0.6199, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"grad_norm": 1.164373517036438, |
|
"learning_rate": 1.2669044532934955e-05, |
|
"loss": 0.4871, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.7076148986816406, |
|
"eval_runtime": 1.4157, |
|
"eval_samples_per_second": 45.206, |
|
"eval_steps_per_second": 5.651, |
|
"step": 72 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 240, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2449725503657472.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 1.6097138935964413e-05, |
|
"per_device_train_batch_size": 8 |
|
} |
|
} |
|
|