Automatic Speech Recognition
Transformers
Safetensors
Arabic
wav2vec2
Inference Endpoints
XLSR-Wav2Vec2-arabic / trainer_state.json
Abosteet's picture
Upload 10 files
1504ad5 verified
raw
history blame contribute delete
No virus
7.26 kB
{
"best_metric": 0.21690590111642744,
"best_model_checkpoint": "/content/drive/MyDrive/Graduation project/dataset/part 3/Models/asc_wav2vec2_large_xlsr_model/checkpoint-500",
"epoch": 3.0,
"eval_steps": 100,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.2857142857142856,
"grad_norm": 0.21812936663627625,
"learning_rate": 0.00023,
"loss": 4.0688,
"step": 100
},
{
"epoch": 2.2857142857142856,
"eval_cer": 1.0,
"eval_loss": 2.944187879562378,
"eval_runtime": 25.2241,
"eval_samples_per_second": 3.964,
"eval_steps_per_second": 3.964,
"eval_wer": 1.0,
"step": 100
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.5214684009552002,
"learning_rate": 0.00023,
"loss": 1.9289,
"step": 200
},
{
"epoch": 4.571428571428571,
"eval_cer": 0.09955781857162,
"eval_loss": 0.5113587379455566,
"eval_runtime": 24.8081,
"eval_samples_per_second": 4.031,
"eval_steps_per_second": 4.031,
"eval_wer": 0.4920255183413078,
"step": 200
},
{
"epoch": 6.857142857142857,
"grad_norm": 0.45686057209968567,
"learning_rate": 0.00023,
"loss": 0.3881,
"step": 300
},
{
"epoch": 6.857142857142857,
"eval_cer": 0.053463754522310065,
"eval_loss": 0.23284225165843964,
"eval_runtime": 24.918,
"eval_samples_per_second": 4.013,
"eval_steps_per_second": 4.013,
"eval_wer": 0.2711323763955343,
"step": 300
},
{
"epoch": 9.142857142857142,
"grad_norm": 0.3496246039867401,
"learning_rate": 0.00023,
"loss": 0.1989,
"step": 400
},
{
"epoch": 9.142857142857142,
"eval_cer": 0.05011389521640091,
"eval_loss": 0.20896148681640625,
"eval_runtime": 25.0682,
"eval_samples_per_second": 3.989,
"eval_steps_per_second": 3.989,
"eval_wer": 0.2567783094098884,
"step": 400
},
{
"epoch": 11.428571428571429,
"grad_norm": 1.072981357574463,
"learning_rate": 0.00023,
"loss": 0.1418,
"step": 500
},
{
"epoch": 11.428571428571429,
"eval_cer": 0.041136272276564384,
"eval_loss": 0.1919952929019928,
"eval_runtime": 24.835,
"eval_samples_per_second": 4.027,
"eval_steps_per_second": 4.027,
"eval_wer": 0.21690590111642744,
"step": 500
},
{
"epoch": 1.2,
"grad_norm": 2.501166820526123,
"learning_rate": 0.00023,
"loss": 0.9766,
"step": 600
},
{
"epoch": 1.2,
"eval_cer": 0.22892712711125207,
"eval_loss": 0.7451229095458984,
"eval_runtime": 219.3212,
"eval_samples_per_second": 9.119,
"eval_steps_per_second": 9.119,
"eval_wer": 0.7126639264043845,
"step": 600
},
{
"epoch": 1.4,
"grad_norm": 2.21364688873291,
"learning_rate": 0.00023,
"loss": 0.7679,
"step": 700
},
{
"epoch": 1.4,
"eval_cer": 0.20032114820394895,
"eval_loss": 0.6842666864395142,
"eval_runtime": 220.1309,
"eval_samples_per_second": 9.086,
"eval_steps_per_second": 9.086,
"eval_wer": 0.6376981796829125,
"step": 700
},
{
"epoch": 1.6,
"grad_norm": 1.8162668943405151,
"learning_rate": 0.00023,
"loss": 0.6767,
"step": 800
},
{
"epoch": 1.6,
"eval_cer": 0.19177702006185077,
"eval_loss": 0.6032456159591675,
"eval_runtime": 218.5745,
"eval_samples_per_second": 9.15,
"eval_steps_per_second": 9.15,
"eval_wer": 0.6313368565276962,
"step": 800
},
{
"epoch": 1.8,
"grad_norm": 1.9325016736984253,
"learning_rate": 0.00023,
"loss": 0.6254,
"step": 900
},
{
"epoch": 1.8,
"eval_cer": 0.18921972880818333,
"eval_loss": 0.5750653147697449,
"eval_runtime": 221.3428,
"eval_samples_per_second": 9.036,
"eval_steps_per_second": 9.036,
"eval_wer": 0.6109806224310042,
"step": 900
},
{
"epoch": 2.0,
"grad_norm": 1.9160747528076172,
"learning_rate": 0.00023,
"loss": 0.6156,
"step": 1000
},
{
"epoch": 2.0,
"eval_cer": 0.1742526365871065,
"eval_loss": 0.5430945158004761,
"eval_runtime": 219.1129,
"eval_samples_per_second": 9.128,
"eval_steps_per_second": 9.128,
"eval_wer": 0.5729105500097866,
"step": 1000
},
{
"epoch": 2.2,
"grad_norm": 1.2983200550079346,
"learning_rate": 0.00023,
"loss": 0.4506,
"step": 1100
},
{
"epoch": 2.2,
"eval_cer": 0.15419078582190152,
"eval_loss": 0.5200591087341309,
"eval_runtime": 220.7565,
"eval_samples_per_second": 9.06,
"eval_steps_per_second": 9.06,
"eval_wer": 0.5123311802701116,
"step": 1100
},
{
"epoch": 2.4,
"grad_norm": 1.5487444400787354,
"learning_rate": 0.00023,
"loss": 0.4399,
"step": 1200
},
{
"epoch": 2.4,
"eval_cer": 0.15298152406629134,
"eval_loss": 0.522709310054779,
"eval_runtime": 215.7214,
"eval_samples_per_second": 9.271,
"eval_steps_per_second": 9.271,
"eval_wer": 0.5041103934233705,
"step": 1200
},
{
"epoch": 2.6,
"grad_norm": 1.8711397647857666,
"learning_rate": 0.00023,
"loss": 0.4346,
"step": 1300
},
{
"epoch": 2.6,
"eval_cer": 0.153219411624772,
"eval_loss": 0.5107786059379578,
"eval_runtime": 219.26,
"eval_samples_per_second": 9.122,
"eval_steps_per_second": 9.122,
"eval_wer": 0.5042082599334508,
"step": 1300
},
{
"epoch": 2.8,
"grad_norm": 1.684812068939209,
"learning_rate": 0.00023,
"loss": 0.4148,
"step": 1400
},
{
"epoch": 2.8,
"eval_cer": 0.14915549916739354,
"eval_loss": 0.5022586584091187,
"eval_runtime": 220.0102,
"eval_samples_per_second": 9.09,
"eval_steps_per_second": 9.09,
"eval_wer": 0.4847328244274809,
"step": 1400
},
{
"epoch": 3.0,
"grad_norm": 1.5067812204360962,
"learning_rate": 0.0,
"loss": 0.4207,
"step": 1500
},
{
"epoch": 3.0,
"eval_cer": 0.1372809452065657,
"eval_loss": 0.4935847222805023,
"eval_runtime": 220.1042,
"eval_samples_per_second": 9.087,
"eval_steps_per_second": 9.087,
"eval_wer": 0.4493051477784302,
"step": 1500
},
{
"epoch": 3.0,
"step": 1500,
"total_flos": 4.297170075587716e+18,
"train_loss": 0.38818399047851565,
"train_runtime": 7019.9025,
"train_samples_per_second": 3.419,
"train_steps_per_second": 0.214
}
],
"logging_steps": 100,
"max_steps": 1500,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 4.297170075587716e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}