|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.506702412868632, |
|
"eval_steps": 200, |
|
"global_step": 4200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0467, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.994863481875841e-05, |
|
"loss": 0.0433, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_f1_score": 0.6251468860164512, |
|
"eval_label_f1": 0.8319623971797884, |
|
"eval_loss": 0.05231842026114464, |
|
"eval_runtime": 457.2641, |
|
"eval_samples_per_second": 2.187, |
|
"eval_steps_per_second": 0.547, |
|
"eval_wer": 0.10434244136543076, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.979475034558115e-05, |
|
"loss": 0.0402, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.9538978924776634e-05, |
|
"loss": 0.0391, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_f1_score": 0.6206896551724138, |
|
"eval_label_f1": 0.8346394984326019, |
|
"eval_loss": 0.050447478890419006, |
|
"eval_runtime": 457.7184, |
|
"eval_samples_per_second": 2.185, |
|
"eval_steps_per_second": 0.546, |
|
"eval_wer": 0.10472946822509482, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.9182371575975736e-05, |
|
"loss": 0.037, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.8726393675266716e-05, |
|
"loss": 0.0381, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_f1_score": 0.6142467886337096, |
|
"eval_label_f1": 0.8322304398598676, |
|
"eval_loss": 0.049576789140701294, |
|
"eval_runtime": 458.238, |
|
"eval_samples_per_second": 2.182, |
|
"eval_steps_per_second": 0.546, |
|
"eval_wer": 0.10654849446551591, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.817291893365055e-05, |
|
"loss": 0.037, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.752422169756048e-05, |
|
"loss": 0.0374, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_f1_score": 0.6158081006685017, |
|
"eval_label_f1": 0.8360204482894219, |
|
"eval_loss": 0.04838084056973457, |
|
"eval_runtime": 456.7741, |
|
"eval_samples_per_second": 2.189, |
|
"eval_steps_per_second": 0.547, |
|
"eval_wer": 0.10705162938307919, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.678296760308474e-05, |
|
"loss": 0.0375, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.595220262229601e-05, |
|
"loss": 0.0374, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_f1_score": 0.6155063291139241, |
|
"eval_label_f1": 0.8370253164556962, |
|
"eval_loss": 0.047435563057661057, |
|
"eval_runtime": 463.3137, |
|
"eval_samples_per_second": 2.158, |
|
"eval_steps_per_second": 0.54, |
|
"eval_wer": 0.10693552132517997, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 4.503534054669892e-05, |
|
"loss": 0.0379, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.4036148959228365e-05, |
|
"loss": 0.0342, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"eval_f1_score": 0.6118110236220473, |
|
"eval_label_f1": 0.8362204724409448, |
|
"eval_loss": 0.04737536609172821, |
|
"eval_runtime": 458.8739, |
|
"eval_samples_per_second": 2.179, |
|
"eval_steps_per_second": 0.545, |
|
"eval_wer": 0.1077095750445081, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.2958733752443195e-05, |
|
"loss": 0.0345, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.180752225653292e-05, |
|
"loss": 0.0362, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_f1_score": 0.6138147566718996, |
|
"eval_label_f1": 0.8375196232339089, |
|
"eval_loss": 0.04683598503470421, |
|
"eval_runtime": 458.5195, |
|
"eval_samples_per_second": 2.181, |
|
"eval_steps_per_second": 0.545, |
|
"eval_wer": 0.10790308847434012, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.058724504646834e-05, |
|
"loss": 0.0343, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 3.9302916503054246e-05, |
|
"loss": 0.0351, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_f1_score": 0.6101960784313726, |
|
"eval_label_f1": 0.836078431372549, |
|
"eval_loss": 0.04606114700436592, |
|
"eval_runtime": 460.3927, |
|
"eval_samples_per_second": 2.172, |
|
"eval_steps_per_second": 0.543, |
|
"eval_wer": 0.10821270996207137, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.7959814207763135e-05, |
|
"loss": 0.0355, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.656345725602089e-05, |
|
"loss": 0.0339, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"eval_f1_score": 0.611111111111111, |
|
"eval_label_f1": 0.838810641627543, |
|
"eval_loss": 0.046606115996837616, |
|
"eval_runtime": 460.841, |
|
"eval_samples_per_second": 2.17, |
|
"eval_steps_per_second": 0.542, |
|
"eval_wer": 0.10794179116030653, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.5119583578059846e-05, |
|
"loss": 0.0333, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 3.363412636053269e-05, |
|
"loss": 0.0323, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"eval_f1_score": 0.6168371361132967, |
|
"eval_label_f1": 0.8418568056648308, |
|
"eval_loss": 0.04674151912331581, |
|
"eval_runtime": 460.8468, |
|
"eval_samples_per_second": 2.17, |
|
"eval_steps_per_second": 0.542, |
|
"eval_wer": 0.10879325025156746, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 3.211318966577581e-05, |
|
"loss": 0.0332, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.056302334890786e-05, |
|
"loss": 0.0338, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"eval_f1_score": 0.6093443266588143, |
|
"eval_label_f1": 0.8425598743619945, |
|
"eval_loss": 0.0457298718392849, |
|
"eval_runtime": 456.4605, |
|
"eval_samples_per_second": 2.191, |
|
"eval_steps_per_second": 0.548, |
|
"eval_wer": 0.10856103413576902, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 2.8989997375834482e-05, |
|
"loss": 0.0333, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 2.7400575647692046e-05, |
|
"loss": 0.032, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"eval_f1_score": 0.6090404440919905, |
|
"eval_label_f1": 0.8398096748612214, |
|
"eval_loss": 0.045234858989715576, |
|
"eval_runtime": 457.189, |
|
"eval_samples_per_second": 2.187, |
|
"eval_steps_per_second": 0.547, |
|
"eval_wer": 0.10852233144980261, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.5801289439291388e-05, |
|
"loss": 0.0335, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 2.419871056070862e-05, |
|
"loss": 0.0307, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"eval_f1_score": 0.6139315230224321, |
|
"eval_label_f1": 0.8421881149153877, |
|
"eval_loss": 0.04505770280957222, |
|
"eval_runtime": 456.5336, |
|
"eval_samples_per_second": 2.19, |
|
"eval_steps_per_second": 0.548, |
|
"eval_wer": 0.10859973682173543, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 2.2599424352307957e-05, |
|
"loss": 0.0324, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.1010002624165527e-05, |
|
"loss": 0.0321, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_f1_score": 0.6115702479338844, |
|
"eval_label_f1": 0.8398268398268398, |
|
"eval_loss": 0.04516015574336052, |
|
"eval_runtime": 459.143, |
|
"eval_samples_per_second": 2.178, |
|
"eval_steps_per_second": 0.544, |
|
"eval_wer": 0.10829011533400418, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 1.9436976651092144e-05, |
|
"loss": 0.0303, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 1.7886810334224192e-05, |
|
"loss": 0.0313, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"eval_f1_score": 0.6116352201257862, |
|
"eval_label_f1": 0.8404088050314464, |
|
"eval_loss": 0.044775452464818954, |
|
"eval_runtime": 458.1981, |
|
"eval_samples_per_second": 2.182, |
|
"eval_steps_per_second": 0.546, |
|
"eval_wer": 0.10921897979719793, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 1.6365873639467315e-05, |
|
"loss": 0.0321, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 1.4880416421940155e-05, |
|
"loss": 0.0309, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"eval_f1_score": 0.6109148017275227, |
|
"eval_label_f1": 0.8402041617589321, |
|
"eval_loss": 0.04491139575839043, |
|
"eval_runtime": 457.2993, |
|
"eval_samples_per_second": 2.187, |
|
"eval_steps_per_second": 0.547, |
|
"eval_wer": 0.10832881801997059, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 1.3436542743979125e-05, |
|
"loss": 0.0318, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 1.2040185792236874e-05, |
|
"loss": 0.0305, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"eval_f1_score": 0.6085590891244601, |
|
"eval_label_f1": 0.8402041617589321, |
|
"eval_loss": 0.0448303148150444, |
|
"eval_runtime": 459.7209, |
|
"eval_samples_per_second": 2.175, |
|
"eval_steps_per_second": 0.544, |
|
"eval_wer": 0.10825141264803777, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 1.0697083496945765e-05, |
|
"loss": 0.0302, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 9.412754953531663e-06, |
|
"loss": 0.0301, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"eval_f1_score": 0.6115702479338844, |
|
"eval_label_f1": 0.8374655647382919, |
|
"eval_loss": 0.044706329703330994, |
|
"eval_runtime": 459.3177, |
|
"eval_samples_per_second": 2.177, |
|
"eval_steps_per_second": 0.544, |
|
"eval_wer": 0.10813530459013855, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 8.192477743467078e-06, |
|
"loss": 0.0304, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"learning_rate": 7.041266247556813e-06, |
|
"loss": 0.03, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"eval_f1_score": 0.6103434662455587, |
|
"eval_label_f1": 0.8401105408606395, |
|
"eval_loss": 0.04461517930030823, |
|
"eval_runtime": 456.0173, |
|
"eval_samples_per_second": 2.193, |
|
"eval_steps_per_second": 0.548, |
|
"eval_wer": 0.10867714219366824, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 5.9638510407716394e-06, |
|
"loss": 0.0314, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 4.9646594533010875e-06, |
|
"loss": 0.0302, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"eval_f1_score": 0.6120047077285209, |
|
"eval_label_f1": 0.8387602981561396, |
|
"eval_loss": 0.04448845237493515, |
|
"eval_runtime": 457.6573, |
|
"eval_samples_per_second": 2.185, |
|
"eval_steps_per_second": 0.546, |
|
"eval_wer": 0.108367520705937, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 4.047797377703985e-06, |
|
"loss": 0.0309, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 3.217032396915265e-06, |
|
"loss": 0.0294, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"eval_f1_score": 0.6132075471698112, |
|
"eval_label_f1": 0.8396226415094339, |
|
"eval_loss": 0.044213637709617615, |
|
"eval_runtime": 455.3785, |
|
"eval_samples_per_second": 2.196, |
|
"eval_steps_per_second": 0.549, |
|
"eval_wer": 0.10863843950770183, |
|
"step": 4200 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 200, |
|
"total_flos": 1.64159538942299e+20, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|