wavlm-large-tamil-ipa / trainer_state.json
speech31's picture
upload model
586ac9f
{
"best_metric": 0.5534710884094238,
"best_model_checkpoint": "wavlm-tamil-ipa/checkpoint-1000",
"epoch": 0.7136485280999108,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 2.97e-05,
"loss": 6.4757,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 5.94e-05,
"loss": 2.7305,
"step": 200
},
{
"epoch": 0.21,
"learning_rate": 8.939999999999999e-05,
"loss": 2.5809,
"step": 300
},
{
"epoch": 0.29,
"learning_rate": 0.0001191,
"loss": 2.53,
"step": 400
},
{
"epoch": 0.36,
"learning_rate": 0.00014879999999999998,
"loss": 2.0147,
"step": 500
},
{
"epoch": 0.36,
"eval_cer": 0.6800635831193839,
"eval_loss": 1.4391984939575195,
"eval_runtime": 1401.2346,
"eval_samples_per_second": 8.599,
"eval_steps_per_second": 1.075,
"step": 500
},
{
"epoch": 0.43,
"learning_rate": 0.00017849999999999997,
"loss": 0.934,
"step": 600
},
{
"epoch": 0.5,
"learning_rate": 0.00020819999999999996,
"loss": 0.597,
"step": 700
},
{
"epoch": 0.57,
"learning_rate": 0.0002382,
"loss": 0.4904,
"step": 800
},
{
"epoch": 0.64,
"learning_rate": 0.0002676,
"loss": 0.4568,
"step": 900
},
{
"epoch": 0.71,
"learning_rate": 0.00029759999999999997,
"loss": 0.3961,
"step": 1000
},
{
"epoch": 0.71,
"eval_cer": 0.3502788029525032,
"eval_loss": 0.5534710884094238,
"eval_runtime": 1403.4751,
"eval_samples_per_second": 8.585,
"eval_steps_per_second": 1.074,
"step": 1000
}
],
"max_steps": 70050,
"num_train_epochs": 50,
"total_flos": 6.366682745104583e+18,
"trial_name": null,
"trial_params": null
}