|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.5022341376228776, |
|
"eval_steps": 200, |
|
"global_step": 1400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0467, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.994863481875841e-05, |
|
"loss": 0.0433, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_f1_score": 0.6251468860164512, |
|
"eval_label_f1": 0.8319623971797884, |
|
"eval_loss": 0.05231842026114464, |
|
"eval_runtime": 457.2641, |
|
"eval_samples_per_second": 2.187, |
|
"eval_steps_per_second": 0.547, |
|
"eval_wer": 0.10434244136543076, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.979475034558115e-05, |
|
"loss": 0.0402, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.9538978924776634e-05, |
|
"loss": 0.0391, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_f1_score": 0.6206896551724138, |
|
"eval_label_f1": 0.8346394984326019, |
|
"eval_loss": 0.050447478890419006, |
|
"eval_runtime": 457.7184, |
|
"eval_samples_per_second": 2.185, |
|
"eval_steps_per_second": 0.546, |
|
"eval_wer": 0.10472946822509482, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.9182371575975736e-05, |
|
"loss": 0.037, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.8726393675266716e-05, |
|
"loss": 0.0381, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_f1_score": 0.6142467886337096, |
|
"eval_label_f1": 0.8322304398598676, |
|
"eval_loss": 0.049576789140701294, |
|
"eval_runtime": 458.238, |
|
"eval_samples_per_second": 2.182, |
|
"eval_steps_per_second": 0.546, |
|
"eval_wer": 0.10654849446551591, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.817291893365055e-05, |
|
"loss": 0.037, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.752422169756048e-05, |
|
"loss": 0.0374, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_f1_score": 0.6158081006685017, |
|
"eval_label_f1": 0.8360204482894219, |
|
"eval_loss": 0.04838084056973457, |
|
"eval_runtime": 456.7741, |
|
"eval_samples_per_second": 2.189, |
|
"eval_steps_per_second": 0.547, |
|
"eval_wer": 0.10705162938307919, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.678296760308474e-05, |
|
"loss": 0.0375, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.595220262229601e-05, |
|
"loss": 0.0374, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_f1_score": 0.6155063291139241, |
|
"eval_label_f1": 0.8370253164556962, |
|
"eval_loss": 0.047435563057661057, |
|
"eval_runtime": 463.3137, |
|
"eval_samples_per_second": 2.158, |
|
"eval_steps_per_second": 0.54, |
|
"eval_wer": 0.10693552132517997, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 4.503534054669892e-05, |
|
"loss": 0.0379, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.4036148959228365e-05, |
|
"loss": 0.0342, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"eval_f1_score": 0.6118110236220473, |
|
"eval_label_f1": 0.8362204724409448, |
|
"eval_loss": 0.04737536609172821, |
|
"eval_runtime": 458.8739, |
|
"eval_samples_per_second": 2.179, |
|
"eval_steps_per_second": 0.545, |
|
"eval_wer": 0.1077095750445081, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.2958733752443195e-05, |
|
"loss": 0.0345, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.180752225653292e-05, |
|
"loss": 0.0362, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_f1_score": 0.6138147566718996, |
|
"eval_label_f1": 0.8375196232339089, |
|
"eval_loss": 0.04683598503470421, |
|
"eval_runtime": 458.5195, |
|
"eval_samples_per_second": 2.181, |
|
"eval_steps_per_second": 0.545, |
|
"eval_wer": 0.10790308847434012, |
|
"step": 1400 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 200, |
|
"total_flos": 5.471994810085343e+19, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|