whisper-small-Arabic / checkpoint-1000 /trainer_state (2).json
Martha-987's picture
Upload checkpoint-1000 with huggingface_hub
3d0de85
{
"best_metric": 51.185382425856204,
"best_model_checkpoint": "./whisper-small-Arabicc1/checkpoint-1000",
"epoch": 0.41562759767248547,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.2000000000000006e-07,
"loss": 3.262,
"step": 25
},
{
"epoch": 0.02,
"learning_rate": 9.200000000000001e-07,
"loss": 2.5218,
"step": 50
},
{
"epoch": 0.03,
"learning_rate": 1.42e-06,
"loss": 1.7958,
"step": 75
},
{
"epoch": 0.04,
"learning_rate": 1.9200000000000003e-06,
"loss": 1.2313,
"step": 100
},
{
"epoch": 0.05,
"learning_rate": 2.42e-06,
"loss": 1.0969,
"step": 125
},
{
"epoch": 0.06,
"learning_rate": 2.92e-06,
"loss": 0.9609,
"step": 150
},
{
"epoch": 0.07,
"learning_rate": 3.4200000000000007e-06,
"loss": 0.9173,
"step": 175
},
{
"epoch": 0.08,
"learning_rate": 3.920000000000001e-06,
"loss": 0.8191,
"step": 200
},
{
"epoch": 0.09,
"learning_rate": 4.42e-06,
"loss": 0.7742,
"step": 225
},
{
"epoch": 0.1,
"learning_rate": 4.92e-06,
"loss": 0.6781,
"step": 250
},
{
"epoch": 0.11,
"learning_rate": 5.420000000000001e-06,
"loss": 0.5776,
"step": 275
},
{
"epoch": 0.12,
"learning_rate": 5.92e-06,
"loss": 0.4557,
"step": 300
},
{
"epoch": 0.14,
"learning_rate": 6.42e-06,
"loss": 0.4113,
"step": 325
},
{
"epoch": 0.15,
"learning_rate": 6.92e-06,
"loss": 0.4198,
"step": 350
},
{
"epoch": 0.16,
"learning_rate": 7.420000000000001e-06,
"loss": 0.393,
"step": 375
},
{
"epoch": 0.17,
"learning_rate": 7.92e-06,
"loss": 0.4105,
"step": 400
},
{
"epoch": 0.18,
"learning_rate": 8.42e-06,
"loss": 0.4266,
"step": 425
},
{
"epoch": 0.19,
"learning_rate": 8.920000000000001e-06,
"loss": 0.4303,
"step": 450
},
{
"epoch": 0.2,
"learning_rate": 9.42e-06,
"loss": 0.3524,
"step": 475
},
{
"epoch": 0.21,
"learning_rate": 9.920000000000002e-06,
"loss": 0.3636,
"step": 500
},
{
"epoch": 0.22,
"learning_rate": 9.58e-06,
"loss": 0.3609,
"step": 525
},
{
"epoch": 0.23,
"learning_rate": 9.080000000000001e-06,
"loss": 0.3359,
"step": 550
},
{
"epoch": 0.24,
"learning_rate": 8.580000000000001e-06,
"loss": 0.3273,
"step": 575
},
{
"epoch": 0.25,
"learning_rate": 8.08e-06,
"loss": 0.3641,
"step": 600
},
{
"epoch": 0.26,
"learning_rate": 7.58e-06,
"loss": 0.3759,
"step": 625
},
{
"epoch": 0.27,
"learning_rate": 7.08e-06,
"loss": 0.3445,
"step": 650
},
{
"epoch": 0.28,
"learning_rate": 6.5800000000000005e-06,
"loss": 0.3277,
"step": 675
},
{
"epoch": 0.29,
"learning_rate": 6.08e-06,
"loss": 0.3411,
"step": 700
},
{
"epoch": 0.3,
"learning_rate": 5.580000000000001e-06,
"loss": 0.3377,
"step": 725
},
{
"epoch": 0.31,
"learning_rate": 5.0800000000000005e-06,
"loss": 0.3398,
"step": 750
},
{
"epoch": 0.32,
"learning_rate": 4.58e-06,
"loss": 0.3125,
"step": 775
},
{
"epoch": 0.33,
"learning_rate": 4.08e-06,
"loss": 0.3291,
"step": 800
},
{
"epoch": 0.34,
"learning_rate": 3.58e-06,
"loss": 0.3104,
"step": 825
},
{
"epoch": 0.35,
"learning_rate": 3.08e-06,
"loss": 0.299,
"step": 850
},
{
"epoch": 0.36,
"learning_rate": 2.5800000000000003e-06,
"loss": 0.2963,
"step": 875
},
{
"epoch": 0.37,
"learning_rate": 2.08e-06,
"loss": 0.3068,
"step": 900
},
{
"epoch": 0.38,
"learning_rate": 1.5800000000000001e-06,
"loss": 0.3393,
"step": 925
},
{
"epoch": 0.39,
"learning_rate": 1.08e-06,
"loss": 0.3138,
"step": 950
},
{
"epoch": 0.41,
"learning_rate": 5.800000000000001e-07,
"loss": 0.3214,
"step": 975
},
{
"epoch": 0.42,
"learning_rate": 8e-08,
"loss": 0.2726,
"step": 1000
},
{
"epoch": 0.42,
"eval_loss": 0.38373443484306335,
"eval_runtime": 4844.9923,
"eval_samples_per_second": 2.155,
"eval_steps_per_second": 0.269,
"eval_wer": 51.185382425856204,
"step": 1000
}
],
"max_steps": 1000,
"num_train_epochs": 1,
"total_flos": 4.61736640512e+18,
"trial_name": null,
"trial_params": null
}