Lemswasabi commited on
Commit
4688492
1 Parent(s): 8835b02

add model without lm

Browse files
README.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - automatic-speech-recognition
4
+ - Lemswasabi/tuudle
5
+ - generated_from_trainer
6
+ datasets:
7
+ - tuudle
8
+ model-index:
9
+ - name: ''
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ #
17
+
18
+ This model is a fine-tuned version of [Lemswasabi/letzspeak](https://huggingface.co/Lemswasabi/letzspeak) on the LEMSWASABI/TUUDLE - RTL dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.1058
21
+ - Wer: 0.1075
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 7.5e-05
41
+ - train_batch_size: 3
42
+ - eval_batch_size: 3
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 12
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 2000
49
+ - num_epochs: 50.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
55
+ |:-------------:|:-----:|:-----:|:---------------:|:------:|
56
+ | 3.1484 | 0.89 | 500 | 3.0844 | 1.0 |
57
+ | 2.6539 | 1.77 | 1000 | 1.7272 | 0.9358 |
58
+ | 0.8732 | 2.66 | 1500 | 0.1975 | 0.1609 |
59
+ | 0.8075 | 3.55 | 2000 | 0.1483 | 0.1468 |
60
+ | 0.7358 | 4.43 | 2500 | 0.1331 | 0.1401 |
61
+ | 0.7079 | 5.32 | 3000 | 0.1273 | 0.1364 |
62
+ | 0.7032 | 6.21 | 3500 | 0.1133 | 0.1240 |
63
+ | 0.7129 | 7.09 | 4000 | 0.1124 | 0.1290 |
64
+ | 0.6771 | 7.98 | 4500 | 0.1121 | 0.1300 |
65
+ | 0.6859 | 8.86 | 5000 | 0.1095 | 0.1313 |
66
+ | 0.6496 | 9.75 | 5500 | 0.1091 | 0.1250 |
67
+ | 0.6431 | 10.64 | 6000 | 0.1102 | 0.1293 |
68
+ | 0.6422 | 11.52 | 6500 | 0.1107 | 0.1179 |
69
+ | 0.6334 | 12.41 | 7000 | 0.1049 | 0.1236 |
70
+ | 0.599 | 13.3 | 7500 | 0.1092 | 0.1152 |
71
+ | 0.6205 | 14.18 | 8000 | 0.1047 | 0.1219 |
72
+ | 0.5944 | 15.07 | 8500 | 0.1068 | 0.1203 |
73
+ | 0.6102 | 15.96 | 9000 | 0.1056 | 0.1159 |
74
+ | 0.5983 | 16.84 | 9500 | 0.1061 | 0.1152 |
75
+ | 0.5882 | 17.73 | 10000 | 0.1043 | 0.1135 |
76
+ | 0.5876 | 18.62 | 10500 | 0.1023 | 0.1159 |
77
+ | 0.5717 | 19.5 | 11000 | 0.1037 | 0.1233 |
78
+ | 0.5537 | 20.39 | 11500 | 0.1070 | 0.1192 |
79
+ | 0.5636 | 21.28 | 12000 | 0.1036 | 0.1169 |
80
+ | 0.5536 | 22.16 | 12500 | 0.1008 | 0.1182 |
81
+ | 0.5656 | 23.05 | 13000 | 0.1010 | 0.1172 |
82
+ | 0.5504 | 23.94 | 13500 | 0.1019 | 0.1105 |
83
+ | 0.5476 | 24.82 | 14000 | 0.1026 | 0.1166 |
84
+ | 0.5375 | 25.71 | 14500 | 0.1107 | 0.1189 |
85
+ | 0.5318 | 26.6 | 15000 | 0.1051 | 0.1142 |
86
+ | 0.5278 | 27.48 | 15500 | 0.1049 | 0.1166 |
87
+ | 0.5204 | 28.37 | 16000 | 0.1081 | 0.1182 |
88
+ | 0.512 | 29.26 | 16500 | 0.1062 | 0.1156 |
89
+ | 0.5082 | 30.14 | 17000 | 0.1045 | 0.1135 |
90
+ | 0.5193 | 31.03 | 17500 | 0.1091 | 0.1145 |
91
+ | 0.5129 | 31.91 | 18000 | 0.1040 | 0.1088 |
92
+ | 0.5126 | 32.8 | 18500 | 0.1085 | 0.1169 |
93
+ | 0.496 | 33.69 | 19000 | 0.1070 | 0.1166 |
94
+ | 0.5017 | 34.57 | 19500 | 0.1119 | 0.1162 |
95
+ | 0.4808 | 35.46 | 20000 | 0.1101 | 0.1139 |
96
+ | 0.4939 | 36.35 | 20500 | 0.1081 | 0.1125 |
97
+ | 0.4738 | 37.23 | 21000 | 0.1091 | 0.1098 |
98
+ | 0.4978 | 38.12 | 21500 | 0.1057 | 0.1092 |
99
+ | 0.4972 | 39.01 | 22000 | 0.1074 | 0.1105 |
100
+ | 0.4773 | 39.89 | 22500 | 0.1062 | 0.1108 |
101
+ | 0.4741 | 40.78 | 23000 | 0.1057 | 0.1085 |
102
+ | 0.4776 | 41.67 | 23500 | 0.1077 | 0.1085 |
103
+ | 0.4637 | 42.55 | 24000 | 0.1061 | 0.1095 |
104
+ | 0.4853 | 43.44 | 24500 | 0.1081 | 0.1075 |
105
+ | 0.4602 | 44.33 | 25000 | 0.1076 | 0.1085 |
106
+ | 0.4667 | 45.21 | 25500 | 0.1078 | 0.1078 |
107
+ | 0.4484 | 46.1 | 26000 | 0.1056 | 0.1082 |
108
+ | 0.4601 | 46.99 | 26500 | 0.1066 | 0.1078 |
109
+ | 0.4691 | 47.87 | 27000 | 0.1068 | 0.1085 |
110
+ | 0.4457 | 48.76 | 27500 | 0.1066 | 0.1078 |
111
+ | 0.475 | 49.65 | 28000 | 0.1060 | 0.1082 |
112
+
113
+
114
+ ### Framework versions
115
+
116
+ - Transformers 4.20.0.dev0
117
+ - Pytorch 1.11.0+cu113
118
+ - Datasets 2.2.1
119
+ - Tokenizers 0.12.1
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 57, "</s>": 58}
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 50.0,
3
+ "eval_loss": 0.10580451041460037,
4
+ "eval_runtime": 31.0996,
5
+ "eval_samples": 178,
6
+ "eval_samples_per_second": 5.724,
7
+ "eval_steps_per_second": 1.929,
8
+ "eval_wer": 0.10749076251259658,
9
+ "train_loss": 0.6889870901987062,
10
+ "train_runtime": 132713.9902,
11
+ "train_samples": 6770,
12
+ "train_samples_per_second": 2.551,
13
+ "train_steps_per_second": 0.212
14
+ }
config.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Lemswasabi/letzspeak",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 64,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.25,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.75,
75
+ "mask_time_selection": "static",
76
+ "model_type": "wav2vec2",
77
+ "num_adapter_layers": 3,
78
+ "num_attention_heads": 16,
79
+ "num_codevector_groups": 2,
80
+ "num_codevectors_per_group": 320,
81
+ "num_conv_pos_embedding_groups": 16,
82
+ "num_conv_pos_embeddings": 128,
83
+ "num_feat_extract_layers": 7,
84
+ "num_hidden_layers": 24,
85
+ "num_negatives": 100,
86
+ "output_hidden_size": 1024,
87
+ "pad_token_id": 56,
88
+ "proj_codevector_dim": 768,
89
+ "tdnn_dilation": [
90
+ 1,
91
+ 2,
92
+ 3,
93
+ 1,
94
+ 1
95
+ ],
96
+ "tdnn_dim": [
97
+ 512,
98
+ 512,
99
+ 512,
100
+ 512,
101
+ 1500
102
+ ],
103
+ "tdnn_kernel": [
104
+ 5,
105
+ 3,
106
+ 3,
107
+ 1,
108
+ 1
109
+ ],
110
+ "torch_dtype": "float32",
111
+ "transformers_version": "4.20.0.dev0",
112
+ "use_weighted_layer_sum": false,
113
+ "vocab_size": 59,
114
+ "xvector_output_dim": 512
115
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 50.0,
3
+ "eval_loss": 0.10580451041460037,
4
+ "eval_runtime": 31.0996,
5
+ "eval_samples": 178,
6
+ "eval_samples_per_second": 5.724,
7
+ "eval_steps_per_second": 1.929,
8
+ "eval_wer": 0.10749076251259658
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d35b9085ec4fe5c026ae1207feb043e4df0016d99055a3a68d93bf0255c37980
3
+ size 1262140593
run.sh ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc.py \
2
+ --dataset_name="Lemswasabi/tuudle" \
3
+ --model_name_or_path="Lemswasabi/letzspeak" \
4
+ --dataset_config_name="rtl" \
5
+ --train_split_name="train" \
6
+ --eval_split_name="validation" \
7
+ --output_dir="./" \
8
+ --overwrite_output_dir \
9
+ --num_train_epochs="50" \
10
+ --per_device_train_batch_size="3" \
11
+ --per_device_eval_batch_size="3" \
12
+ --gradient_accumulation_steps="4" \
13
+ --learning_rate="7.5e-5" \
14
+ --warmup_steps="2000" \
15
+ --length_column_name="input_length" \
16
+ --evaluation_strategy="steps" \
17
+ --text_column_name="sentence" \
18
+ --chars_to_replace \- \
19
+ --chars_to_ignore , ? . ! \; \: \" “ % ‘ „ ” � — ’ … – \
20
+ --save_steps="500" \
21
+ --eval_steps="500" \
22
+ --logging_steps="100" \
23
+ --layerdrop="0.0" \
24
+ --activation_dropout="0.1" \
25
+ --save_total_limit="3" \
26
+ --freeze_feature_encoder \
27
+ --feat_proj_dropout="0.0" \
28
+ --mask_time_prob="0.75" \
29
+ --mask_time_length="10" \
30
+ --mask_feature_prob="0.25" \
31
+ --mask_feature_length="64" \
32
+ --gradient_checkpointing \
33
+ --use_auth_token \
34
+ --fp16 \
35
+ --group_by_length \
36
+ --do_train --do_eval \
37
+ --push_to_hub
run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import transformers
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoFeatureExtractor,
37
+ AutoModelForCTC,
38
+ AutoProcessor,
39
+ AutoTokenizer,
40
+ HfArgumentParser,
41
+ Trainer,
42
+ TrainingArguments,
43
+ Wav2Vec2Processor,
44
+ set_seed,
45
+ )
46
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
+ from transformers.utils import check_min_version
48
+ from transformers.utils.versions import require_version
49
+
50
+
51
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
+ check_min_version("4.20.0.dev0")
53
+
54
+ require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def list_field(default=None, metadata=None):
61
+ return field(default_factory=lambda: default, metadata=metadata)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
68
+ """
69
+
70
+ model_name_or_path: str = field(
71
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
72
+ )
73
+ tokenizer_name_or_path: Optional[str] = field(
74
+ default=None,
75
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
76
+ )
77
+ cache_dir: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
80
+ )
81
+ freeze_feature_encoder: bool = field(
82
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
83
+ )
84
+ attention_dropout: float = field(
85
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
86
+ )
87
+ activation_dropout: float = field(
88
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
89
+ )
90
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
91
+ hidden_dropout: float = field(
92
+ default=0.0,
93
+ metadata={
94
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
95
+ },
96
+ )
97
+ final_dropout: float = field(
98
+ default=0.0,
99
+ metadata={"help": "The dropout probability for the final projection layer."},
100
+ )
101
+ mask_time_prob: float = field(
102
+ default=0.05,
103
+ metadata={
104
+ "help": (
105
+ "Probability of each feature vector along the time axis to be chosen as the start of the vector"
106
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
107
+ "vectors will be masked along the time axis."
108
+ )
109
+ },
110
+ )
111
+ mask_time_length: int = field(
112
+ default=10,
113
+ metadata={"help": "Length of vector span to mask along the time axis."},
114
+ )
115
+ mask_feature_prob: float = field(
116
+ default=0.0,
117
+ metadata={
118
+ "help": (
119
+ "Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan"
120
+ " to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature"
121
+ " bins will be masked along the time axis."
122
+ )
123
+ },
124
+ )
125
+ mask_feature_length: int = field(
126
+ default=10,
127
+ metadata={"help": "Length of vector span to mask along the feature axis."},
128
+ )
129
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
130
+ ctc_loss_reduction: Optional[str] = field(
131
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
132
+ )
133
+
134
+
135
+ @dataclass
136
+ class DataTrainingArguments:
137
+ """
138
+ Arguments pertaining to what data we are going to input our model for training and eval.
139
+
140
+ Using `HfArgumentParser` we can turn this class
141
+ into argparse arguments to be able to specify them on
142
+ the command line.
143
+ """
144
+
145
+ dataset_name: str = field(
146
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ dataset_config_name: str = field(
149
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
150
+ )
151
+ train_split_name: str = field(
152
+ default="train+validation",
153
+ metadata={
154
+ "help": (
155
+ "The name of the training data set split to use (via the datasets library). Defaults to "
156
+ "'train+validation'"
157
+ )
158
+ },
159
+ )
160
+ eval_split_name: str = field(
161
+ default="test",
162
+ metadata={
163
+ "help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"
164
+ },
165
+ )
166
+ audio_column_name: str = field(
167
+ default="audio",
168
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
169
+ )
170
+ text_column_name: str = field(
171
+ default="text",
172
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
173
+ )
174
+ overwrite_cache: bool = field(
175
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
176
+ )
177
+ preprocessing_num_workers: Optional[int] = field(
178
+ default=None,
179
+ metadata={"help": "The number of processes to use for the preprocessing."},
180
+ )
181
+ max_train_samples: Optional[int] = field(
182
+ default=None,
183
+ metadata={
184
+ "help": (
185
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
186
+ "value if set."
187
+ )
188
+ },
189
+ )
190
+ max_eval_samples: Optional[int] = field(
191
+ default=None,
192
+ metadata={
193
+ "help": (
194
+ "For debugging purposes or quicker training, truncate the number of validation examples to this "
195
+ "value if set."
196
+ )
197
+ },
198
+ )
199
+ chars_to_ignore: Optional[List[str]] = list_field(
200
+ default=None,
201
+ metadata={"help": "A list of characters to remove from the transcripts."},
202
+ )
203
+ chars_to_replace: Optional[List[str]] = list_field(
204
+ default=None,
205
+ metadata={"help": "A list of characters to replace from the transcripts."},
206
+ )
207
+ eval_metrics: List[str] = list_field(
208
+ default=["wer"],
209
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
210
+ )
211
+ max_duration_in_seconds: float = field(
212
+ default=20.0,
213
+ metadata={
214
+ "help": (
215
+ "Filter audio files that are longer than `max_duration_in_seconds` seconds to"
216
+ " 'max_duration_in_seconds`"
217
+ )
218
+ },
219
+ )
220
+ min_duration_in_seconds: float = field(
221
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
222
+ )
223
+ preprocessing_only: bool = field(
224
+ default=False,
225
+ metadata={
226
+ "help": (
227
+ "Whether to only do data preprocessing and skip training. This is especially useful when data"
228
+ " preprocessing errors out in distributed training due to timeout. In this case, one should run the"
229
+ " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets"
230
+ " can consequently be loaded in distributed training"
231
+ )
232
+ },
233
+ )
234
+ use_auth_token: bool = field(
235
+ default=False,
236
+ metadata={
237
+ "help": (
238
+ "If :obj:`True`, will use the token generated when running"
239
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
240
+ )
241
+ },
242
+ )
243
+ unk_token: str = field(
244
+ default="[UNK]",
245
+ metadata={"help": "The unk token for the tokenizer"},
246
+ )
247
+ pad_token: str = field(
248
+ default="[PAD]",
249
+ metadata={"help": "The padding token for the tokenizer"},
250
+ )
251
+ word_delimiter_token: str = field(
252
+ default="|",
253
+ metadata={"help": "The word delimiter token for the tokenizer"},
254
+ )
255
+ phoneme_language: Optional[str] = field(
256
+ default=None,
257
+ metadata={
258
+ "help": (
259
+ "The target language that should be used be"
260
+ " passed to the tokenizer for tokenization. Note that"
261
+ " this is only relevant if the model classifies the"
262
+ " input audio to a sequence of phoneme sequences."
263
+ )
264
+ },
265
+ )
266
+
267
+
268
+ @dataclass
269
+ class DataCollatorCTCWithPadding:
270
+ """
271
+ Data collator that will dynamically pad the inputs received.
272
+ Args:
273
+ processor (:class:`~transformers.AutoProcessor`)
274
+ The processor used for proccessing the data.
275
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
276
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
277
+ among:
278
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
279
+ sequence if provided).
280
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
281
+ maximum acceptable input length for the model if that argument is not provided.
282
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
283
+ different lengths).
284
+ max_length (:obj:`int`, `optional`):
285
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
286
+ max_length_labels (:obj:`int`, `optional`):
287
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
288
+ pad_to_multiple_of (:obj:`int`, `optional`):
289
+ If set will pad the sequence to a multiple of the provided value.
290
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
291
+ 7.5 (Volta).
292
+ """
293
+
294
+ processor: AutoProcessor
295
+ padding: Union[bool, str] = "longest"
296
+ pad_to_multiple_of: Optional[int] = None
297
+ pad_to_multiple_of_labels: Optional[int] = None
298
+
299
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
300
+ # split inputs and labels since they have to be of different lenghts and need
301
+ # different padding methods
302
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
303
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
304
+
305
+ batch = self.processor.pad(
306
+ input_features,
307
+ padding=self.padding,
308
+ pad_to_multiple_of=self.pad_to_multiple_of,
309
+ return_tensors="pt",
310
+ )
311
+
312
+ with self.processor.as_target_processor():
313
+ labels_batch = self.processor.pad(
314
+ label_features,
315
+ padding=self.padding,
316
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
317
+ return_tensors="pt",
318
+ )
319
+
320
+ # replace padding with -100 to ignore loss correctly
321
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
322
+
323
+ batch["labels"] = labels
324
+
325
+ return batch
326
+
327
+
328
+ def create_vocabulary_from_data(
329
+ datasets: DatasetDict,
330
+ word_delimiter_token: Optional[str] = None,
331
+ unk_token: Optional[str] = None,
332
+ pad_token: Optional[str] = None,
333
+ ):
334
+ # Given training and test labels create vocabulary
335
+ def extract_all_chars(batch):
336
+ all_text = " ".join(batch["target_text"])
337
+ vocab = list(set(all_text))
338
+ return {"vocab": [vocab], "all_text": [all_text]}
339
+
340
+ vocabs = datasets.map(
341
+ extract_all_chars,
342
+ batched=True,
343
+ batch_size=-1,
344
+ keep_in_memory=True,
345
+ remove_columns=datasets["train"].column_names,
346
+ )
347
+
348
+ # take union of all unique characters in each dataset
349
+ vocab_set = functools.reduce(
350
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
351
+ )
352
+
353
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
354
+
355
+ # replace white space with delimiter token
356
+ if word_delimiter_token is not None:
357
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
358
+ del vocab_dict[" "]
359
+
360
+ # add unk and pad token
361
+ if unk_token is not None:
362
+ vocab_dict[unk_token] = len(vocab_dict)
363
+
364
+ if pad_token is not None:
365
+ vocab_dict[pad_token] = len(vocab_dict)
366
+
367
+ return vocab_dict
368
+
369
+
370
+ def main():
371
+ # See all possible arguments in src/transformers/training_args.py
372
+ # or by passing the --help flag to this script.
373
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
374
+
375
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
376
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
377
+ # If we pass only one argument to the script and it's the path to a json file,
378
+ # let's parse it to get our arguments.
379
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
380
+ else:
381
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
382
+
383
+ # Detecting last checkpoint.
384
+ last_checkpoint = None
385
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
386
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
387
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
388
+ raise ValueError(
389
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
390
+ "Use --overwrite_output_dir to overcome."
391
+ )
392
+ elif last_checkpoint is not None:
393
+ logger.info(
394
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
395
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
396
+ )
397
+
398
+ # Setup logging
399
+ logging.basicConfig(
400
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
401
+ datefmt="%m/%d/%Y %H:%M:%S",
402
+ handlers=[logging.StreamHandler(sys.stdout)],
403
+ )
404
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
405
+
406
+ # Log on each process the small summary:
407
+ logger.warning(
408
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
409
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
410
+ )
411
+ # Set the verbosity to info of the Transformers logger (on main process only):
412
+ if is_main_process(training_args.local_rank):
413
+ transformers.utils.logging.set_verbosity_info()
414
+ logger.info("Training/evaluation parameters %s", training_args)
415
+
416
+ # Set seed before initializing model.
417
+ set_seed(training_args.seed)
418
+
419
+ # 1. First, let's load the dataset
420
+ raw_datasets = DatasetDict()
421
+
422
+ if training_args.do_train:
423
+ raw_datasets["train"] = load_dataset(
424
+ data_args.dataset_name,
425
+ data_args.dataset_config_name,
426
+ split=data_args.train_split_name,
427
+ use_auth_token=data_args.use_auth_token,
428
+ )
429
+
430
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
431
+ raise ValueError(
432
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'."
433
+ " Make sure to set `--audio_column_name` to the correct audio column - one of"
434
+ f" {', '.join(raw_datasets['train'].column_names)}."
435
+ )
436
+
437
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
438
+ raise ValueError(
439
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
440
+ "Make sure to set `--text_column_name` to the correct text column - one of "
441
+ f"{', '.join(raw_datasets['train'].column_names)}."
442
+ )
443
+
444
+ if data_args.max_train_samples is not None:
445
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
446
+
447
+ if training_args.do_eval:
448
+ raw_datasets["eval"] = load_dataset(
449
+ data_args.dataset_name,
450
+ data_args.dataset_config_name,
451
+ split=data_args.eval_split_name,
452
+ use_auth_token=data_args.use_auth_token,
453
+ )
454
+
455
+ if data_args.max_eval_samples is not None:
456
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
457
+
458
+ # 2. We remove some special characters from the datasets
459
+ # that make training complicated and do not help in transcribing the speech
460
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
461
+ # that could be easily picked up by the model
462
+ chars_to_replace_regex = (
463
+ f'[{"".join(data_args.chars_to_replace)}]' if data_args.chars_to_replace is not None else None
464
+ )
465
+ chars_to_ignore_regex = (
466
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
467
+ )
468
+ text_column_name = data_args.text_column_name
469
+
470
+ def replace_special_characters(batch):
471
+ if chars_to_replace_regex is not None:
472
+ batch[text_column_name] = re.sub(chars_to_replace_regex, " ", batch[text_column_name]).lower() + " "
473
+ else:
474
+ batch[text_column_name] = batch[text_column_name].lower() + " "
475
+ return batch
476
+
477
+ def remove_special_characters(batch):
478
+ if chars_to_ignore_regex is not None:
479
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
480
+ else:
481
+ batch["target_text"] = batch[text_column_name].lower() + " "
482
+ return batch
483
+
484
+ with training_args.main_process_first(desc="dataset map special characters removal"):
485
+ raw_datasets = raw_datasets.map(
486
+ replace_special_characters,
487
+ desc="replace special characters from datasets",
488
+ )
489
+ raw_datasets = raw_datasets.map(
490
+ remove_special_characters,
491
+ remove_columns=[text_column_name],
492
+ desc="remove special characters from datasets",
493
+ )
494
+
495
+ # save special tokens for tokenizer
496
+ word_delimiter_token = data_args.word_delimiter_token
497
+ unk_token = data_args.unk_token
498
+ pad_token = data_args.pad_token
499
+
500
+ # 3. Next, let's load the config as we might need it to create
501
+ # the tokenizer
502
+ # load config
503
+ config = AutoConfig.from_pretrained(
504
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
505
+ )
506
+
507
+ # 4. Next, if no tokenizer file is defined,
508
+ # we create the vocabulary of the model by extracting all unique characters from
509
+ # the training and evaluation datasets
510
+ # We need to make sure that only first rank saves vocabulary
511
+ # make sure all processes wait until vocab is created
512
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
513
+ tokenizer_kwargs = {}
514
+ if tokenizer_name_or_path is None:
515
+ # save vocab in training output dir
516
+ tokenizer_name_or_path = training_args.output_dir
517
+
518
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
519
+
520
+ with training_args.main_process_first():
521
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
522
+ os.remove(vocab_file)
523
+
524
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
525
+ if not os.path.isfile(vocab_file):
526
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
527
+ vocab_dict = create_vocabulary_from_data(
528
+ raw_datasets,
529
+ word_delimiter_token=word_delimiter_token,
530
+ unk_token=unk_token,
531
+ pad_token=pad_token,
532
+ )
533
+
534
+ # save vocab dict to be loaded into tokenizer
535
+ with open(vocab_file, "w") as file:
536
+ json.dump(vocab_dict, file)
537
+
538
+ # if tokenizer has just been created
539
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
540
+ tokenizer_kwargs = {
541
+ "config": config if config.tokenizer_class is not None else None,
542
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
543
+ "unk_token": unk_token,
544
+ "pad_token": pad_token,
545
+ "word_delimiter_token": word_delimiter_token,
546
+ }
547
+
548
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
549
+ # Note for distributed training, the .from_pretrained methods guarantee that only
550
+ # one local process can concurrently download model & vocab.
551
+
552
+ # load feature_extractor and tokenizer
553
+ tokenizer = AutoTokenizer.from_pretrained(
554
+ tokenizer_name_or_path,
555
+ use_auth_token=data_args.use_auth_token,
556
+ **tokenizer_kwargs,
557
+ )
558
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
559
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
560
+ )
561
+
562
+ # adapt config
563
+ config.update(
564
+ {
565
+ "feat_proj_dropout": model_args.feat_proj_dropout,
566
+ "attention_dropout": model_args.attention_dropout,
567
+ "hidden_dropout": model_args.hidden_dropout,
568
+ "final_dropout": model_args.final_dropout,
569
+ "mask_time_prob": model_args.mask_time_prob,
570
+ "mask_time_length": model_args.mask_time_length,
571
+ "mask_feature_prob": model_args.mask_feature_prob,
572
+ "mask_feature_length": model_args.mask_feature_length,
573
+ "gradient_checkpointing": training_args.gradient_checkpointing,
574
+ "layerdrop": model_args.layerdrop,
575
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
576
+ "pad_token_id": tokenizer.pad_token_id,
577
+ "vocab_size": len(tokenizer),
578
+ "activation_dropout": model_args.activation_dropout,
579
+ }
580
+ )
581
+
582
+ # create model
583
+ model = AutoModelForCTC.from_pretrained(
584
+ model_args.model_name_or_path,
585
+ cache_dir=model_args.cache_dir,
586
+ config=config,
587
+ use_auth_token=data_args.use_auth_token,
588
+ ignore_mismatched_sizes=True,
589
+ )
590
+
591
+ # freeze encoder
592
+ if model_args.freeze_feature_encoder:
593
+ model.freeze_feature_encoder()
594
+
595
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
596
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
597
+ # so that we just need to set the correct target sampling rate and normalize the input
598
+ # via the `feature_extractor`
599
+
600
+ # make sure that dataset decodes audio with correct sampling rate
601
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
602
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
603
+ raw_datasets = raw_datasets.cast_column(
604
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
605
+ )
606
+
607
+ # derive max & min input length for sample rate & max duration
608
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
609
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
610
+ audio_column_name = data_args.audio_column_name
611
+ num_workers = data_args.preprocessing_num_workers
612
+
613
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
614
+ phoneme_language = data_args.phoneme_language
615
+
616
+ # Preprocessing the datasets.
617
+ # We need to read the audio files as arrays and tokenize the targets.
618
+ def prepare_dataset(batch):
619
+ # load audio
620
+ sample = batch[audio_column_name]
621
+
622
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
623
+ batch["input_values"] = inputs.input_values[0]
624
+ batch["input_length"] = len(batch["input_values"])
625
+
626
+ # encode targets
627
+ additional_kwargs = {}
628
+ if phoneme_language is not None:
629
+ additional_kwargs["phonemizer_lang"] = phoneme_language
630
+
631
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
632
+ return batch
633
+
634
+ with training_args.main_process_first(desc="dataset map preprocessing"):
635
+ vectorized_datasets = raw_datasets.map(
636
+ prepare_dataset,
637
+ remove_columns=next(iter(raw_datasets.values())).column_names,
638
+ num_proc=num_workers,
639
+ desc="preprocess datasets",
640
+ )
641
+
642
+ def is_audio_in_length_range(length):
643
+ return length > min_input_length and length < max_input_length
644
+
645
+ # filter data that is shorter than min_input_length
646
+ vectorized_datasets = vectorized_datasets.filter(
647
+ is_audio_in_length_range,
648
+ num_proc=num_workers,
649
+ input_columns=["input_length"],
650
+ )
651
+
652
+ # 7. Next, we can prepare the training.
653
+ # Let's use word error rate (WER) as our evaluation metric,
654
+ # instantiate a data collator and the trainer
655
+
656
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
657
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
658
+
659
+ # for large datasets it is advised to run the preprocessing on a
660
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
661
+ # be a timeout when running the script in distributed mode.
662
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
663
+ # cached dataset
664
+ if data_args.preprocessing_only:
665
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
666
+ return
667
+
668
+ def compute_metrics(pred):
669
+ pred_logits = pred.predictions
670
+ pred_ids = np.argmax(pred_logits, axis=-1)
671
+
672
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
673
+
674
+ pred_str = tokenizer.batch_decode(pred_ids)
675
+ # we do not want to group tokens when computing the metrics
676
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
677
+
678
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
679
+
680
+ return metrics
681
+
682
+ # Now save everything to be able to create a single processor later
683
+ if is_main_process(training_args.local_rank):
684
+ # save feature extractor, tokenizer and config
685
+ feature_extractor.save_pretrained(training_args.output_dir)
686
+ tokenizer.save_pretrained(training_args.output_dir)
687
+ config.save_pretrained(training_args.output_dir)
688
+
689
+ try:
690
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
691
+ except (OSError, KeyError):
692
+ warnings.warn(
693
+ "Loading a processor from a feature extractor config that does not"
694
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
695
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
696
+ " `'processor_class': 'Wav2Vec2Processor'`",
697
+ FutureWarning,
698
+ )
699
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
700
+
701
+ # Instantiate custom data collator
702
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
703
+
704
+ # Initialize Trainer
705
+ trainer = Trainer(
706
+ model=model,
707
+ data_collator=data_collator,
708
+ args=training_args,
709
+ compute_metrics=compute_metrics,
710
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
711
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
712
+ tokenizer=feature_extractor,
713
+ )
714
+
715
+ # 8. Finally, we can start training
716
+
717
+ # Training
718
+ if training_args.do_train:
719
+
720
+ # use last checkpoint if exist
721
+ if last_checkpoint is not None:
722
+ checkpoint = last_checkpoint
723
+ elif os.path.isdir(model_args.model_name_or_path):
724
+ checkpoint = model_args.model_name_or_path
725
+ else:
726
+ checkpoint = None
727
+
728
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
729
+ trainer.save_model()
730
+
731
+ metrics = train_result.metrics
732
+ max_train_samples = (
733
+ data_args.max_train_samples
734
+ if data_args.max_train_samples is not None
735
+ else len(vectorized_datasets["train"])
736
+ )
737
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
738
+
739
+ trainer.log_metrics("train", metrics)
740
+ trainer.save_metrics("train", metrics)
741
+ trainer.save_state()
742
+
743
+ # Evaluation
744
+ results = {}
745
+ if training_args.do_eval:
746
+ logger.info("*** Evaluate ***")
747
+ metrics = trainer.evaluate()
748
+ max_eval_samples = (
749
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
750
+ )
751
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
752
+
753
+ trainer.log_metrics("eval", metrics)
754
+ trainer.save_metrics("eval", metrics)
755
+
756
+ # Write model card and (optionally) push to hub
757
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
758
+ kwargs = {
759
+ "finetuned_from": model_args.model_name_or_path,
760
+ "tasks": "speech-recognition",
761
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
762
+ "dataset_args": (
763
+ f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:"
764
+ f" {data_args.eval_split_name}"
765
+ ),
766
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
767
+ }
768
+ if "common_voice" in data_args.dataset_name:
769
+ kwargs["language"] = config_name
770
+
771
+ if training_args.push_to_hub:
772
+ trainer.push_to_hub(**kwargs)
773
+ else:
774
+ trainer.create_model_card(**kwargs)
775
+
776
+ return results
777
+
778
+
779
+ if __name__ == "__main__":
780
+ main()
runs/May21_09-02-48_bioman2/1653117325.7043028/events.out.tfevents.1653117325.bioman2.19214.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82fb10ff9df3efb104856e487ef603c660eb2f153aa260fd17fbe81016b0af6c
3
+ size 5120
runs/May21_09-02-48_bioman2/events.out.tfevents.1653117325.bioman2.19214.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a7c807f6a57e6444980141ea9ca5f0c2b26ea07cfe67afcbc2a3bb0ec031e0b
3
+ size 68029
runs/May21_09-02-48_bioman2/events.out.tfevents.1653250459.bioman2.19214.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e0d53e20940f64d80f7ff85ddefa514142b54a7fec3fbf3af653b1603f5407c
3
+ size 364
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 50.0,
3
+ "train_loss": 0.6889870901987062,
4
+ "train_runtime": 132713.9902,
5
+ "train_samples": 6770,
6
+ "train_samples_per_second": 2.551,
7
+ "train_steps_per_second": 0.212
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 49.999556933983165,
5
+ "global_step": 28200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.18,
12
+ "learning_rate": 3.7125e-06,
13
+ "loss": 9.6392,
14
+ "step": 100
15
+ },
16
+ {
17
+ "epoch": 0.35,
18
+ "learning_rate": 7.4625e-06,
19
+ "loss": 5.3472,
20
+ "step": 200
21
+ },
22
+ {
23
+ "epoch": 0.53,
24
+ "learning_rate": 1.1212499999999998e-05,
25
+ "loss": 3.7052,
26
+ "step": 300
27
+ },
28
+ {
29
+ "epoch": 0.71,
30
+ "learning_rate": 1.49625e-05,
31
+ "loss": 3.35,
32
+ "step": 400
33
+ },
34
+ {
35
+ "epoch": 0.89,
36
+ "learning_rate": 1.8712499999999997e-05,
37
+ "loss": 3.1484,
38
+ "step": 500
39
+ },
40
+ {
41
+ "epoch": 0.89,
42
+ "eval_loss": 3.084352731704712,
43
+ "eval_runtime": 31.0524,
44
+ "eval_samples_per_second": 5.732,
45
+ "eval_steps_per_second": 1.932,
46
+ "eval_wer": 1.0,
47
+ "step": 500
48
+ },
49
+ {
50
+ "epoch": 1.06,
51
+ "learning_rate": 2.2462499999999997e-05,
52
+ "loss": 3.0531,
53
+ "step": 600
54
+ },
55
+ {
56
+ "epoch": 1.24,
57
+ "learning_rate": 2.6212499999999997e-05,
58
+ "loss": 2.9911,
59
+ "step": 700
60
+ },
61
+ {
62
+ "epoch": 1.42,
63
+ "learning_rate": 2.99625e-05,
64
+ "loss": 2.9576,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 1.6,
69
+ "learning_rate": 3.37125e-05,
70
+ "loss": 2.9406,
71
+ "step": 900
72
+ },
73
+ {
74
+ "epoch": 1.77,
75
+ "learning_rate": 3.7462499999999996e-05,
76
+ "loss": 2.6539,
77
+ "step": 1000
78
+ },
79
+ {
80
+ "epoch": 1.77,
81
+ "eval_loss": 1.7271525859832764,
82
+ "eval_runtime": 30.9027,
83
+ "eval_samples_per_second": 5.76,
84
+ "eval_steps_per_second": 1.942,
85
+ "eval_wer": 0.9358414511252939,
86
+ "step": 1000
87
+ },
88
+ {
89
+ "epoch": 1.95,
90
+ "learning_rate": 4.12125e-05,
91
+ "loss": 1.5429,
92
+ "step": 1100
93
+ },
94
+ {
95
+ "epoch": 2.13,
96
+ "learning_rate": 4.4962499999999995e-05,
97
+ "loss": 1.0875,
98
+ "step": 1200
99
+ },
100
+ {
101
+ "epoch": 2.3,
102
+ "learning_rate": 4.871249999999999e-05,
103
+ "loss": 0.9728,
104
+ "step": 1300
105
+ },
106
+ {
107
+ "epoch": 2.48,
108
+ "learning_rate": 5.2424999999999994e-05,
109
+ "loss": 0.9117,
110
+ "step": 1400
111
+ },
112
+ {
113
+ "epoch": 2.66,
114
+ "learning_rate": 5.6175e-05,
115
+ "loss": 0.8732,
116
+ "step": 1500
117
+ },
118
+ {
119
+ "epoch": 2.66,
120
+ "eval_loss": 0.19752363860607147,
121
+ "eval_runtime": 31.0609,
122
+ "eval_samples_per_second": 5.731,
123
+ "eval_steps_per_second": 1.932,
124
+ "eval_wer": 0.16090023513604298,
125
+ "step": 1500
126
+ },
127
+ {
128
+ "epoch": 2.84,
129
+ "learning_rate": 5.9925e-05,
130
+ "loss": 0.8518,
131
+ "step": 1600
132
+ },
133
+ {
134
+ "epoch": 3.01,
135
+ "learning_rate": 6.367499999999999e-05,
136
+ "loss": 0.8373,
137
+ "step": 1700
138
+ },
139
+ {
140
+ "epoch": 3.19,
141
+ "learning_rate": 6.7425e-05,
142
+ "loss": 0.7958,
143
+ "step": 1800
144
+ },
145
+ {
146
+ "epoch": 3.37,
147
+ "learning_rate": 7.1175e-05,
148
+ "loss": 0.8058,
149
+ "step": 1900
150
+ },
151
+ {
152
+ "epoch": 3.55,
153
+ "learning_rate": 7.492499999999999e-05,
154
+ "loss": 0.8075,
155
+ "step": 2000
156
+ },
157
+ {
158
+ "epoch": 3.55,
159
+ "eval_loss": 0.1483038365840912,
160
+ "eval_runtime": 31.0609,
161
+ "eval_samples_per_second": 5.731,
162
+ "eval_steps_per_second": 1.932,
163
+ "eval_wer": 0.1467920725562647,
164
+ "step": 2000
165
+ },
166
+ {
167
+ "epoch": 3.72,
168
+ "learning_rate": 7.471946564885497e-05,
169
+ "loss": 0.785,
170
+ "step": 2100
171
+ },
172
+ {
173
+ "epoch": 3.9,
174
+ "learning_rate": 7.443320610687022e-05,
175
+ "loss": 0.7656,
176
+ "step": 2200
177
+ },
178
+ {
179
+ "epoch": 4.08,
180
+ "learning_rate": 7.414694656488549e-05,
181
+ "loss": 0.7681,
182
+ "step": 2300
183
+ },
184
+ {
185
+ "epoch": 4.26,
186
+ "learning_rate": 7.386068702290076e-05,
187
+ "loss": 0.7535,
188
+ "step": 2400
189
+ },
190
+ {
191
+ "epoch": 4.43,
192
+ "learning_rate": 7.357442748091603e-05,
193
+ "loss": 0.7358,
194
+ "step": 2500
195
+ },
196
+ {
197
+ "epoch": 4.43,
198
+ "eval_loss": 0.1331482231616974,
199
+ "eval_runtime": 30.9431,
200
+ "eval_samples_per_second": 5.752,
201
+ "eval_steps_per_second": 1.939,
202
+ "eval_wer": 0.14007389989922742,
203
+ "step": 2500
204
+ },
205
+ {
206
+ "epoch": 4.61,
207
+ "learning_rate": 7.328816793893128e-05,
208
+ "loss": 0.7478,
209
+ "step": 2600
210
+ },
211
+ {
212
+ "epoch": 4.79,
213
+ "learning_rate": 7.300190839694656e-05,
214
+ "loss": 0.746,
215
+ "step": 2700
216
+ },
217
+ {
218
+ "epoch": 4.96,
219
+ "learning_rate": 7.271564885496182e-05,
220
+ "loss": 0.7545,
221
+ "step": 2800
222
+ },
223
+ {
224
+ "epoch": 5.14,
225
+ "learning_rate": 7.242938931297709e-05,
226
+ "loss": 0.7268,
227
+ "step": 2900
228
+ },
229
+ {
230
+ "epoch": 5.32,
231
+ "learning_rate": 7.214312977099236e-05,
232
+ "loss": 0.7079,
233
+ "step": 3000
234
+ },
235
+ {
236
+ "epoch": 5.32,
237
+ "eval_loss": 0.12731623649597168,
238
+ "eval_runtime": 30.9985,
239
+ "eval_samples_per_second": 5.742,
240
+ "eval_steps_per_second": 1.936,
241
+ "eval_wer": 0.1363789049378569,
242
+ "step": 3000
243
+ },
244
+ {
245
+ "epoch": 5.5,
246
+ "learning_rate": 7.185687022900763e-05,
247
+ "loss": 0.7344,
248
+ "step": 3100
249
+ },
250
+ {
251
+ "epoch": 5.67,
252
+ "learning_rate": 7.15706106870229e-05,
253
+ "loss": 0.7334,
254
+ "step": 3200
255
+ },
256
+ {
257
+ "epoch": 5.85,
258
+ "learning_rate": 7.128435114503815e-05,
259
+ "loss": 0.7274,
260
+ "step": 3300
261
+ },
262
+ {
263
+ "epoch": 6.03,
264
+ "learning_rate": 7.099809160305343e-05,
265
+ "loss": 0.7271,
266
+ "step": 3400
267
+ },
268
+ {
269
+ "epoch": 6.21,
270
+ "learning_rate": 7.071469465648854e-05,
271
+ "loss": 0.7032,
272
+ "step": 3500
273
+ },
274
+ {
275
+ "epoch": 6.21,
276
+ "eval_loss": 0.11333052814006805,
277
+ "eval_runtime": 31.1935,
278
+ "eval_samples_per_second": 5.706,
279
+ "eval_steps_per_second": 1.923,
280
+ "eval_wer": 0.12395028552233793,
281
+ "step": 3500
282
+ },
283
+ {
284
+ "epoch": 6.38,
285
+ "learning_rate": 7.04284351145038e-05,
286
+ "loss": 0.6852,
287
+ "step": 3600
288
+ },
289
+ {
290
+ "epoch": 6.56,
291
+ "learning_rate": 7.014217557251907e-05,
292
+ "loss": 0.699,
293
+ "step": 3700
294
+ },
295
+ {
296
+ "epoch": 6.74,
297
+ "learning_rate": 6.985591603053435e-05,
298
+ "loss": 0.6894,
299
+ "step": 3800
300
+ },
301
+ {
302
+ "epoch": 6.91,
303
+ "learning_rate": 6.956965648854962e-05,
304
+ "loss": 0.7014,
305
+ "step": 3900
306
+ },
307
+ {
308
+ "epoch": 7.09,
309
+ "learning_rate": 6.928339694656487e-05,
310
+ "loss": 0.7129,
311
+ "step": 4000
312
+ },
313
+ {
314
+ "epoch": 7.09,
315
+ "eval_loss": 0.11242285370826721,
316
+ "eval_runtime": 31.0672,
317
+ "eval_samples_per_second": 5.73,
318
+ "eval_steps_per_second": 1.931,
319
+ "eval_wer": 0.1289889150151159,
320
+ "step": 4000
321
+ },
322
+ {
323
+ "epoch": 7.27,
324
+ "learning_rate": 6.899713740458015e-05,
325
+ "loss": 0.6902,
326
+ "step": 4100
327
+ },
328
+ {
329
+ "epoch": 7.45,
330
+ "learning_rate": 6.871087786259541e-05,
331
+ "loss": 0.6898,
332
+ "step": 4200
333
+ },
334
+ {
335
+ "epoch": 7.62,
336
+ "learning_rate": 6.842461832061069e-05,
337
+ "loss": 0.6785,
338
+ "step": 4300
339
+ },
340
+ {
341
+ "epoch": 7.8,
342
+ "learning_rate": 6.813835877862594e-05,
343
+ "loss": 0.7024,
344
+ "step": 4400
345
+ },
346
+ {
347
+ "epoch": 7.98,
348
+ "learning_rate": 6.785209923664122e-05,
349
+ "loss": 0.6771,
350
+ "step": 4500
351
+ },
352
+ {
353
+ "epoch": 7.98,
354
+ "eval_loss": 0.11212227493524551,
355
+ "eval_runtime": 31.0691,
356
+ "eval_samples_per_second": 5.729,
357
+ "eval_steps_per_second": 1.931,
358
+ "eval_wer": 0.12999664091367147,
359
+ "step": 4500
360
+ },
361
+ {
362
+ "epoch": 8.16,
363
+ "learning_rate": 6.756583969465648e-05,
364
+ "loss": 0.6555,
365
+ "step": 4600
366
+ },
367
+ {
368
+ "epoch": 8.33,
369
+ "learning_rate": 6.727958015267174e-05,
370
+ "loss": 0.6897,
371
+ "step": 4700
372
+ },
373
+ {
374
+ "epoch": 8.51,
375
+ "learning_rate": 6.699618320610687e-05,
376
+ "loss": 0.6809,
377
+ "step": 4800
378
+ },
379
+ {
380
+ "epoch": 8.69,
381
+ "learning_rate": 6.670992366412213e-05,
382
+ "loss": 0.6821,
383
+ "step": 4900
384
+ },
385
+ {
386
+ "epoch": 8.86,
387
+ "learning_rate": 6.642366412213741e-05,
388
+ "loss": 0.6859,
389
+ "step": 5000
390
+ },
391
+ {
392
+ "epoch": 8.86,
393
+ "eval_loss": 0.109534852206707,
394
+ "eval_runtime": 31.3176,
395
+ "eval_samples_per_second": 5.684,
396
+ "eval_steps_per_second": 1.916,
397
+ "eval_wer": 0.13134027544507895,
398
+ "step": 5000
399
+ },
400
+ {
401
+ "epoch": 9.04,
402
+ "learning_rate": 6.613740458015266e-05,
403
+ "loss": 0.6849,
404
+ "step": 5100
405
+ },
406
+ {
407
+ "epoch": 9.22,
408
+ "learning_rate": 6.585114503816793e-05,
409
+ "loss": 0.6615,
410
+ "step": 5200
411
+ },
412
+ {
413
+ "epoch": 9.4,
414
+ "learning_rate": 6.55648854961832e-05,
415
+ "loss": 0.6587,
416
+ "step": 5300
417
+ },
418
+ {
419
+ "epoch": 9.57,
420
+ "learning_rate": 6.527862595419846e-05,
421
+ "loss": 0.6741,
422
+ "step": 5400
423
+ },
424
+ {
425
+ "epoch": 9.75,
426
+ "learning_rate": 6.499236641221373e-05,
427
+ "loss": 0.6496,
428
+ "step": 5500
429
+ },
430
+ {
431
+ "epoch": 9.75,
432
+ "eval_loss": 0.1090548112988472,
433
+ "eval_runtime": 31.0434,
434
+ "eval_samples_per_second": 5.734,
435
+ "eval_steps_per_second": 1.933,
436
+ "eval_wer": 0.12495801142089352,
437
+ "step": 5500
438
+ },
439
+ {
440
+ "epoch": 9.93,
441
+ "learning_rate": 6.4706106870229e-05,
442
+ "loss": 0.6672,
443
+ "step": 5600
444
+ },
445
+ {
446
+ "epoch": 10.11,
447
+ "learning_rate": 6.441984732824428e-05,
448
+ "loss": 0.6511,
449
+ "step": 5700
450
+ },
451
+ {
452
+ "epoch": 10.28,
453
+ "learning_rate": 6.413645038167938e-05,
454
+ "loss": 0.6393,
455
+ "step": 5800
456
+ },
457
+ {
458
+ "epoch": 10.46,
459
+ "learning_rate": 6.385019083969465e-05,
460
+ "loss": 0.6531,
461
+ "step": 5900
462
+ },
463
+ {
464
+ "epoch": 10.64,
465
+ "learning_rate": 6.356393129770992e-05,
466
+ "loss": 0.6431,
467
+ "step": 6000
468
+ },
469
+ {
470
+ "epoch": 10.64,
471
+ "eval_loss": 0.11021342128515244,
472
+ "eval_runtime": 30.9854,
473
+ "eval_samples_per_second": 5.745,
474
+ "eval_steps_per_second": 1.936,
475
+ "eval_wer": 0.12932482364796774,
476
+ "step": 6000
477
+ },
478
+ {
479
+ "epoch": 10.82,
480
+ "learning_rate": 6.32776717557252e-05,
481
+ "loss": 0.6476,
482
+ "step": 6100
483
+ },
484
+ {
485
+ "epoch": 10.99,
486
+ "learning_rate": 6.299141221374044e-05,
487
+ "loss": 0.6504,
488
+ "step": 6200
489
+ },
490
+ {
491
+ "epoch": 11.17,
492
+ "learning_rate": 6.270515267175572e-05,
493
+ "loss": 0.6366,
494
+ "step": 6300
495
+ },
496
+ {
497
+ "epoch": 11.35,
498
+ "learning_rate": 6.241889312977098e-05,
499
+ "loss": 0.6435,
500
+ "step": 6400
501
+ },
502
+ {
503
+ "epoch": 11.52,
504
+ "learning_rate": 6.213263358778625e-05,
505
+ "loss": 0.6422,
506
+ "step": 6500
507
+ },
508
+ {
509
+ "epoch": 11.52,
510
+ "eval_loss": 0.11072035878896713,
511
+ "eval_runtime": 31.126,
512
+ "eval_samples_per_second": 5.719,
513
+ "eval_steps_per_second": 1.928,
514
+ "eval_wer": 0.11790393013100436,
515
+ "step": 6500
516
+ },
517
+ {
518
+ "epoch": 11.7,
519
+ "learning_rate": 6.184637404580152e-05,
520
+ "loss": 0.636,
521
+ "step": 6600
522
+ },
523
+ {
524
+ "epoch": 11.88,
525
+ "learning_rate": 6.156011450381679e-05,
526
+ "loss": 0.647,
527
+ "step": 6700
528
+ },
529
+ {
530
+ "epoch": 12.06,
531
+ "learning_rate": 6.127385496183207e-05,
532
+ "loss": 0.644,
533
+ "step": 6800
534
+ },
535
+ {
536
+ "epoch": 12.23,
537
+ "learning_rate": 6.098759541984732e-05,
538
+ "loss": 0.611,
539
+ "step": 6900
540
+ },
541
+ {
542
+ "epoch": 12.41,
543
+ "learning_rate": 6.070133587786259e-05,
544
+ "loss": 0.6334,
545
+ "step": 7000
546
+ },
547
+ {
548
+ "epoch": 12.41,
549
+ "eval_loss": 0.10494451969861984,
550
+ "eval_runtime": 31.1005,
551
+ "eval_samples_per_second": 5.723,
552
+ "eval_steps_per_second": 1.929,
553
+ "eval_wer": 0.12361437688948607,
554
+ "step": 7000
555
+ },
556
+ {
557
+ "epoch": 12.59,
558
+ "learning_rate": 6.0415076335877855e-05,
559
+ "loss": 0.6305,
560
+ "step": 7100
561
+ },
562
+ {
563
+ "epoch": 12.77,
564
+ "learning_rate": 6.0128816793893126e-05,
565
+ "loss": 0.6292,
566
+ "step": 7200
567
+ },
568
+ {
569
+ "epoch": 12.94,
570
+ "learning_rate": 5.984255725190839e-05,
571
+ "loss": 0.6341,
572
+ "step": 7300
573
+ },
574
+ {
575
+ "epoch": 13.12,
576
+ "learning_rate": 5.955629770992366e-05,
577
+ "loss": 0.625,
578
+ "step": 7400
579
+ },
580
+ {
581
+ "epoch": 13.3,
582
+ "learning_rate": 5.927003816793893e-05,
583
+ "loss": 0.599,
584
+ "step": 7500
585
+ },
586
+ {
587
+ "epoch": 13.3,
588
+ "eval_loss": 0.10921534150838852,
589
+ "eval_runtime": 31.0896,
590
+ "eval_samples_per_second": 5.725,
591
+ "eval_steps_per_second": 1.93,
592
+ "eval_wer": 0.11521666106818945,
593
+ "step": 7500
594
+ },
595
+ {
596
+ "epoch": 13.47,
597
+ "learning_rate": 5.898377862595419e-05,
598
+ "loss": 0.6144,
599
+ "step": 7600
600
+ },
601
+ {
602
+ "epoch": 13.65,
603
+ "learning_rate": 5.869751908396946e-05,
604
+ "loss": 0.6314,
605
+ "step": 7700
606
+ },
607
+ {
608
+ "epoch": 13.83,
609
+ "learning_rate": 5.8411259541984726e-05,
610
+ "loss": 0.6185,
611
+ "step": 7800
612
+ },
613
+ {
614
+ "epoch": 14.01,
615
+ "learning_rate": 5.8124999999999997e-05,
616
+ "loss": 0.6122,
617
+ "step": 7900
618
+ },
619
+ {
620
+ "epoch": 14.18,
621
+ "learning_rate": 5.783874045801526e-05,
622
+ "loss": 0.6205,
623
+ "step": 8000
624
+ },
625
+ {
626
+ "epoch": 14.18,
627
+ "eval_loss": 0.1046827957034111,
628
+ "eval_runtime": 31.2229,
629
+ "eval_samples_per_second": 5.701,
630
+ "eval_steps_per_second": 1.922,
631
+ "eval_wer": 0.12193483372522673,
632
+ "step": 8000
633
+ },
634
+ {
635
+ "epoch": 14.36,
636
+ "learning_rate": 5.755248091603053e-05,
637
+ "loss": 0.612,
638
+ "step": 8100
639
+ },
640
+ {
641
+ "epoch": 14.54,
642
+ "learning_rate": 5.7266221374045794e-05,
643
+ "loss": 0.6196,
644
+ "step": 8200
645
+ },
646
+ {
647
+ "epoch": 14.72,
648
+ "learning_rate": 5.6979961832061064e-05,
649
+ "loss": 0.6114,
650
+ "step": 8300
651
+ },
652
+ {
653
+ "epoch": 14.89,
654
+ "learning_rate": 5.6693702290076334e-05,
655
+ "loss": 0.6005,
656
+ "step": 8400
657
+ },
658
+ {
659
+ "epoch": 15.07,
660
+ "learning_rate": 5.64074427480916e-05,
661
+ "loss": 0.5944,
662
+ "step": 8500
663
+ },
664
+ {
665
+ "epoch": 15.07,
666
+ "eval_loss": 0.10684490948915482,
667
+ "eval_runtime": 31.2028,
668
+ "eval_samples_per_second": 5.705,
669
+ "eval_steps_per_second": 1.923,
670
+ "eval_wer": 0.12025529056096741,
671
+ "step": 8500
672
+ },
673
+ {
674
+ "epoch": 15.25,
675
+ "learning_rate": 5.612118320610687e-05,
676
+ "loss": 0.6108,
677
+ "step": 8600
678
+ },
679
+ {
680
+ "epoch": 15.43,
681
+ "learning_rate": 5.583492366412213e-05,
682
+ "loss": 0.5995,
683
+ "step": 8700
684
+ },
685
+ {
686
+ "epoch": 15.6,
687
+ "learning_rate": 5.55486641221374e-05,
688
+ "loss": 0.6117,
689
+ "step": 8800
690
+ },
691
+ {
692
+ "epoch": 15.78,
693
+ "learning_rate": 5.5262404580152665e-05,
694
+ "loss": 0.6006,
695
+ "step": 8900
696
+ },
697
+ {
698
+ "epoch": 15.96,
699
+ "learning_rate": 5.4976145038167935e-05,
700
+ "loss": 0.6102,
701
+ "step": 9000
702
+ },
703
+ {
704
+ "epoch": 15.96,
705
+ "eval_loss": 0.10560546070337296,
706
+ "eval_runtime": 31.2837,
707
+ "eval_samples_per_second": 5.69,
708
+ "eval_steps_per_second": 1.918,
709
+ "eval_wer": 0.11588847833389318,
710
+ "step": 9000
711
+ },
712
+ {
713
+ "epoch": 16.13,
714
+ "learning_rate": 5.46898854961832e-05,
715
+ "loss": 0.5968,
716
+ "step": 9100
717
+ },
718
+ {
719
+ "epoch": 16.31,
720
+ "learning_rate": 5.440362595419847e-05,
721
+ "loss": 0.5912,
722
+ "step": 9200
723
+ },
724
+ {
725
+ "epoch": 16.49,
726
+ "learning_rate": 5.411736641221374e-05,
727
+ "loss": 0.5889,
728
+ "step": 9300
729
+ },
730
+ {
731
+ "epoch": 16.67,
732
+ "learning_rate": 5.3831106870229e-05,
733
+ "loss": 0.6014,
734
+ "step": 9400
735
+ },
736
+ {
737
+ "epoch": 16.84,
738
+ "learning_rate": 5.354484732824427e-05,
739
+ "loss": 0.5983,
740
+ "step": 9500
741
+ },
742
+ {
743
+ "epoch": 16.84,
744
+ "eval_loss": 0.1061149537563324,
745
+ "eval_runtime": 31.4987,
746
+ "eval_samples_per_second": 5.651,
747
+ "eval_steps_per_second": 1.905,
748
+ "eval_wer": 0.11521666106818945,
749
+ "step": 9500
750
+ },
751
+ {
752
+ "epoch": 17.02,
753
+ "learning_rate": 5.3258587786259536e-05,
754
+ "loss": 0.5992,
755
+ "step": 9600
756
+ },
757
+ {
758
+ "epoch": 17.2,
759
+ "learning_rate": 5.2972328244274806e-05,
760
+ "loss": 0.5887,
761
+ "step": 9700
762
+ },
763
+ {
764
+ "epoch": 17.38,
765
+ "learning_rate": 5.268606870229007e-05,
766
+ "loss": 0.6012,
767
+ "step": 9800
768
+ },
769
+ {
770
+ "epoch": 17.55,
771
+ "learning_rate": 5.239980916030534e-05,
772
+ "loss": 0.5985,
773
+ "step": 9900
774
+ },
775
+ {
776
+ "epoch": 17.73,
777
+ "learning_rate": 5.211354961832061e-05,
778
+ "loss": 0.5882,
779
+ "step": 10000
780
+ },
781
+ {
782
+ "epoch": 17.73,
783
+ "eval_loss": 0.10430345684289932,
784
+ "eval_runtime": 30.988,
785
+ "eval_samples_per_second": 5.744,
786
+ "eval_steps_per_second": 1.936,
787
+ "eval_wer": 0.11353711790393013,
788
+ "step": 10000
789
+ },
790
+ {
791
+ "epoch": 17.91,
792
+ "learning_rate": 5.182729007633587e-05,
793
+ "loss": 0.5906,
794
+ "step": 10100
795
+ },
796
+ {
797
+ "epoch": 18.09,
798
+ "learning_rate": 5.1541030534351143e-05,
799
+ "loss": 0.5843,
800
+ "step": 10200
801
+ },
802
+ {
803
+ "epoch": 18.26,
804
+ "learning_rate": 5.1257633587786254e-05,
805
+ "loss": 0.5976,
806
+ "step": 10300
807
+ },
808
+ {
809
+ "epoch": 18.44,
810
+ "learning_rate": 5.0971374045801525e-05,
811
+ "loss": 0.59,
812
+ "step": 10400
813
+ },
814
+ {
815
+ "epoch": 18.62,
816
+ "learning_rate": 5.068511450381679e-05,
817
+ "loss": 0.5876,
818
+ "step": 10500
819
+ },
820
+ {
821
+ "epoch": 18.62,
822
+ "eval_loss": 0.10231117904186249,
823
+ "eval_runtime": 31.0041,
824
+ "eval_samples_per_second": 5.741,
825
+ "eval_steps_per_second": 1.935,
826
+ "eval_wer": 0.11588847833389318,
827
+ "step": 10500
828
+ },
829
+ {
830
+ "epoch": 18.79,
831
+ "learning_rate": 5.039885496183206e-05,
832
+ "loss": 0.5845,
833
+ "step": 10600
834
+ },
835
+ {
836
+ "epoch": 18.97,
837
+ "learning_rate": 5.011259541984732e-05,
838
+ "loss": 0.5812,
839
+ "step": 10700
840
+ },
841
+ {
842
+ "epoch": 19.15,
843
+ "learning_rate": 4.982633587786259e-05,
844
+ "loss": 0.5695,
845
+ "step": 10800
846
+ },
847
+ {
848
+ "epoch": 19.33,
849
+ "learning_rate": 4.954007633587786e-05,
850
+ "loss": 0.5665,
851
+ "step": 10900
852
+ },
853
+ {
854
+ "epoch": 19.5,
855
+ "learning_rate": 4.9253816793893125e-05,
856
+ "loss": 0.5717,
857
+ "step": 11000
858
+ },
859
+ {
860
+ "epoch": 19.5,
861
+ "eval_loss": 0.10367337614297867,
862
+ "eval_runtime": 31.2366,
863
+ "eval_samples_per_second": 5.698,
864
+ "eval_steps_per_second": 1.921,
865
+ "eval_wer": 0.1232784682566342,
866
+ "step": 11000
867
+ },
868
+ {
869
+ "epoch": 19.68,
870
+ "learning_rate": 4.8967557251908396e-05,
871
+ "loss": 0.5657,
872
+ "step": 11100
873
+ },
874
+ {
875
+ "epoch": 19.86,
876
+ "learning_rate": 4.868129770992366e-05,
877
+ "loss": 0.5733,
878
+ "step": 11200
879
+ },
880
+ {
881
+ "epoch": 20.04,
882
+ "learning_rate": 4.839503816793893e-05,
883
+ "loss": 0.5816,
884
+ "step": 11300
885
+ },
886
+ {
887
+ "epoch": 20.21,
888
+ "learning_rate": 4.810877862595419e-05,
889
+ "loss": 0.5694,
890
+ "step": 11400
891
+ },
892
+ {
893
+ "epoch": 20.39,
894
+ "learning_rate": 4.782251908396946e-05,
895
+ "loss": 0.5537,
896
+ "step": 11500
897
+ },
898
+ {
899
+ "epoch": 20.39,
900
+ "eval_loss": 0.10704291611909866,
901
+ "eval_runtime": 31.0837,
902
+ "eval_samples_per_second": 5.726,
903
+ "eval_steps_per_second": 1.93,
904
+ "eval_wer": 0.11924756466241182,
905
+ "step": 11500
906
+ },
907
+ {
908
+ "epoch": 20.57,
909
+ "learning_rate": 4.7536259541984726e-05,
910
+ "loss": 0.5726,
911
+ "step": 11600
912
+ },
913
+ {
914
+ "epoch": 20.74,
915
+ "learning_rate": 4.7249999999999997e-05,
916
+ "loss": 0.5742,
917
+ "step": 11700
918
+ },
919
+ {
920
+ "epoch": 20.92,
921
+ "learning_rate": 4.696374045801527e-05,
922
+ "loss": 0.563,
923
+ "step": 11800
924
+ },
925
+ {
926
+ "epoch": 21.1,
927
+ "learning_rate": 4.667748091603053e-05,
928
+ "loss": 0.5655,
929
+ "step": 11900
930
+ },
931
+ {
932
+ "epoch": 21.28,
933
+ "learning_rate": 4.63912213740458e-05,
934
+ "loss": 0.5636,
935
+ "step": 12000
936
+ },
937
+ {
938
+ "epoch": 21.28,
939
+ "eval_loss": 0.10363561660051346,
940
+ "eval_runtime": 31.1987,
941
+ "eval_samples_per_second": 5.705,
942
+ "eval_steps_per_second": 1.923,
943
+ "eval_wer": 0.11689620423244877,
944
+ "step": 12000
945
+ },
946
+ {
947
+ "epoch": 21.45,
948
+ "learning_rate": 4.6104961832061064e-05,
949
+ "loss": 0.5612,
950
+ "step": 12100
951
+ },
952
+ {
953
+ "epoch": 21.63,
954
+ "learning_rate": 4.5818702290076334e-05,
955
+ "loss": 0.5732,
956
+ "step": 12200
957
+ },
958
+ {
959
+ "epoch": 21.81,
960
+ "learning_rate": 4.55324427480916e-05,
961
+ "loss": 0.5726,
962
+ "step": 12300
963
+ },
964
+ {
965
+ "epoch": 21.99,
966
+ "learning_rate": 4.524618320610687e-05,
967
+ "loss": 0.5547,
968
+ "step": 12400
969
+ },
970
+ {
971
+ "epoch": 22.16,
972
+ "learning_rate": 4.495992366412213e-05,
973
+ "loss": 0.5536,
974
+ "step": 12500
975
+ },
976
+ {
977
+ "epoch": 22.16,
978
+ "eval_loss": 0.10082551091909409,
979
+ "eval_runtime": 31.121,
980
+ "eval_samples_per_second": 5.72,
981
+ "eval_steps_per_second": 1.928,
982
+ "eval_wer": 0.11823983876385623,
983
+ "step": 12500
984
+ },
985
+ {
986
+ "epoch": 22.34,
987
+ "learning_rate": 4.46736641221374e-05,
988
+ "loss": 0.5509,
989
+ "step": 12600
990
+ },
991
+ {
992
+ "epoch": 22.52,
993
+ "learning_rate": 4.439026717557252e-05,
994
+ "loss": 0.5645,
995
+ "step": 12700
996
+ },
997
+ {
998
+ "epoch": 22.69,
999
+ "learning_rate": 4.410400763358778e-05,
1000
+ "loss": 0.5527,
1001
+ "step": 12800
1002
+ },
1003
+ {
1004
+ "epoch": 22.87,
1005
+ "learning_rate": 4.381774809160305e-05,
1006
+ "loss": 0.547,
1007
+ "step": 12900
1008
+ },
1009
+ {
1010
+ "epoch": 23.05,
1011
+ "learning_rate": 4.3534351145038163e-05,
1012
+ "loss": 0.5656,
1013
+ "step": 13000
1014
+ },
1015
+ {
1016
+ "epoch": 23.05,
1017
+ "eval_loss": 0.10101909935474396,
1018
+ "eval_runtime": 30.9319,
1019
+ "eval_samples_per_second": 5.755,
1020
+ "eval_steps_per_second": 1.94,
1021
+ "eval_wer": 0.11723211286530064,
1022
+ "step": 13000
1023
+ },
1024
+ {
1025
+ "epoch": 23.23,
1026
+ "learning_rate": 4.3248091603053434e-05,
1027
+ "loss": 0.5483,
1028
+ "step": 13100
1029
+ },
1030
+ {
1031
+ "epoch": 23.4,
1032
+ "learning_rate": 4.29618320610687e-05,
1033
+ "loss": 0.5501,
1034
+ "step": 13200
1035
+ },
1036
+ {
1037
+ "epoch": 23.58,
1038
+ "learning_rate": 4.267557251908397e-05,
1039
+ "loss": 0.5429,
1040
+ "step": 13300
1041
+ },
1042
+ {
1043
+ "epoch": 23.76,
1044
+ "learning_rate": 4.238931297709923e-05,
1045
+ "loss": 0.5455,
1046
+ "step": 13400
1047
+ },
1048
+ {
1049
+ "epoch": 23.94,
1050
+ "learning_rate": 4.21030534351145e-05,
1051
+ "loss": 0.5504,
1052
+ "step": 13500
1053
+ },
1054
+ {
1055
+ "epoch": 23.94,
1056
+ "eval_loss": 0.10192937403917313,
1057
+ "eval_runtime": 31.0113,
1058
+ "eval_samples_per_second": 5.74,
1059
+ "eval_steps_per_second": 1.935,
1060
+ "eval_wer": 0.11051394020826336,
1061
+ "step": 13500
1062
+ },
1063
+ {
1064
+ "epoch": 24.11,
1065
+ "learning_rate": 4.181679389312977e-05,
1066
+ "loss": 0.5601,
1067
+ "step": 13600
1068
+ },
1069
+ {
1070
+ "epoch": 24.29,
1071
+ "learning_rate": 4.1530534351145035e-05,
1072
+ "loss": 0.5419,
1073
+ "step": 13700
1074
+ },
1075
+ {
1076
+ "epoch": 24.47,
1077
+ "learning_rate": 4.1244274809160305e-05,
1078
+ "loss": 0.5389,
1079
+ "step": 13800
1080
+ },
1081
+ {
1082
+ "epoch": 24.65,
1083
+ "learning_rate": 4.095801526717557e-05,
1084
+ "loss": 0.5572,
1085
+ "step": 13900
1086
+ },
1087
+ {
1088
+ "epoch": 24.82,
1089
+ "learning_rate": 4.067175572519084e-05,
1090
+ "loss": 0.5476,
1091
+ "step": 14000
1092
+ },
1093
+ {
1094
+ "epoch": 24.82,
1095
+ "eval_loss": 0.10260963439941406,
1096
+ "eval_runtime": 31.1565,
1097
+ "eval_samples_per_second": 5.713,
1098
+ "eval_steps_per_second": 1.926,
1099
+ "eval_wer": 0.11656029559959691,
1100
+ "step": 14000
1101
+ },
1102
+ {
1103
+ "epoch": 25.0,
1104
+ "learning_rate": 4.03854961832061e-05,
1105
+ "loss": 0.5554,
1106
+ "step": 14100
1107
+ },
1108
+ {
1109
+ "epoch": 25.18,
1110
+ "learning_rate": 4.009923664122137e-05,
1111
+ "loss": 0.537,
1112
+ "step": 14200
1113
+ },
1114
+ {
1115
+ "epoch": 25.35,
1116
+ "learning_rate": 3.9812977099236635e-05,
1117
+ "loss": 0.5346,
1118
+ "step": 14300
1119
+ },
1120
+ {
1121
+ "epoch": 25.53,
1122
+ "learning_rate": 3.9526717557251906e-05,
1123
+ "loss": 0.5245,
1124
+ "step": 14400
1125
+ },
1126
+ {
1127
+ "epoch": 25.71,
1128
+ "learning_rate": 3.9240458015267176e-05,
1129
+ "loss": 0.5375,
1130
+ "step": 14500
1131
+ },
1132
+ {
1133
+ "epoch": 25.71,
1134
+ "eval_loss": 0.1107296496629715,
1135
+ "eval_runtime": 30.9057,
1136
+ "eval_samples_per_second": 5.759,
1137
+ "eval_steps_per_second": 1.941,
1138
+ "eval_wer": 0.11891165602955996,
1139
+ "step": 14500
1140
+ },
1141
+ {
1142
+ "epoch": 25.89,
1143
+ "learning_rate": 3.895419847328244e-05,
1144
+ "loss": 0.5437,
1145
+ "step": 14600
1146
+ },
1147
+ {
1148
+ "epoch": 26.06,
1149
+ "learning_rate": 3.866793893129771e-05,
1150
+ "loss": 0.5284,
1151
+ "step": 14700
1152
+ },
1153
+ {
1154
+ "epoch": 26.24,
1155
+ "learning_rate": 3.838167938931297e-05,
1156
+ "loss": 0.5276,
1157
+ "step": 14800
1158
+ },
1159
+ {
1160
+ "epoch": 26.42,
1161
+ "learning_rate": 3.809541984732824e-05,
1162
+ "loss": 0.5441,
1163
+ "step": 14900
1164
+ },
1165
+ {
1166
+ "epoch": 26.6,
1167
+ "learning_rate": 3.7809160305343507e-05,
1168
+ "loss": 0.5318,
1169
+ "step": 15000
1170
+ },
1171
+ {
1172
+ "epoch": 26.6,
1173
+ "eval_loss": 0.10514429956674576,
1174
+ "eval_runtime": 31.3716,
1175
+ "eval_samples_per_second": 5.674,
1176
+ "eval_steps_per_second": 1.913,
1177
+ "eval_wer": 0.11420893516963386,
1178
+ "step": 15000
1179
+ },
1180
+ {
1181
+ "epoch": 26.77,
1182
+ "learning_rate": 3.752290076335878e-05,
1183
+ "loss": 0.5343,
1184
+ "step": 15100
1185
+ },
1186
+ {
1187
+ "epoch": 26.95,
1188
+ "learning_rate": 3.723664122137404e-05,
1189
+ "loss": 0.5394,
1190
+ "step": 15200
1191
+ },
1192
+ {
1193
+ "epoch": 27.13,
1194
+ "learning_rate": 3.695038167938931e-05,
1195
+ "loss": 0.5352,
1196
+ "step": 15300
1197
+ },
1198
+ {
1199
+ "epoch": 27.3,
1200
+ "learning_rate": 3.6664122137404574e-05,
1201
+ "loss": 0.5398,
1202
+ "step": 15400
1203
+ },
1204
+ {
1205
+ "epoch": 27.48,
1206
+ "learning_rate": 3.6377862595419844e-05,
1207
+ "loss": 0.5278,
1208
+ "step": 15500
1209
+ },
1210
+ {
1211
+ "epoch": 27.48,
1212
+ "eval_loss": 0.10493182390928268,
1213
+ "eval_runtime": 31.3849,
1214
+ "eval_samples_per_second": 5.672,
1215
+ "eval_steps_per_second": 1.912,
1216
+ "eval_wer": 0.11656029559959691,
1217
+ "step": 15500
1218
+ },
1219
+ {
1220
+ "epoch": 27.66,
1221
+ "learning_rate": 3.609160305343511e-05,
1222
+ "loss": 0.538,
1223
+ "step": 15600
1224
+ },
1225
+ {
1226
+ "epoch": 27.84,
1227
+ "learning_rate": 3.580534351145038e-05,
1228
+ "loss": 0.5143,
1229
+ "step": 15700
1230
+ },
1231
+ {
1232
+ "epoch": 28.01,
1233
+ "learning_rate": 3.551908396946565e-05,
1234
+ "loss": 0.5229,
1235
+ "step": 15800
1236
+ },
1237
+ {
1238
+ "epoch": 28.19,
1239
+ "learning_rate": 3.523282442748091e-05,
1240
+ "loss": 0.522,
1241
+ "step": 15900
1242
+ },
1243
+ {
1244
+ "epoch": 28.37,
1245
+ "learning_rate": 3.494656488549618e-05,
1246
+ "loss": 0.5204,
1247
+ "step": 16000
1248
+ },
1249
+ {
1250
+ "epoch": 28.37,
1251
+ "eval_loss": 0.10806475579738617,
1252
+ "eval_runtime": 31.4114,
1253
+ "eval_samples_per_second": 5.667,
1254
+ "eval_steps_per_second": 1.91,
1255
+ "eval_wer": 0.11823983876385623,
1256
+ "step": 16000
1257
+ },
1258
+ {
1259
+ "epoch": 28.55,
1260
+ "learning_rate": 3.4660305343511445e-05,
1261
+ "loss": 0.5322,
1262
+ "step": 16100
1263
+ },
1264
+ {
1265
+ "epoch": 28.72,
1266
+ "learning_rate": 3.4374045801526715e-05,
1267
+ "loss": 0.5151,
1268
+ "step": 16200
1269
+ },
1270
+ {
1271
+ "epoch": 28.9,
1272
+ "learning_rate": 3.408778625954198e-05,
1273
+ "loss": 0.5393,
1274
+ "step": 16300
1275
+ },
1276
+ {
1277
+ "epoch": 29.08,
1278
+ "learning_rate": 3.380152671755725e-05,
1279
+ "loss": 0.513,
1280
+ "step": 16400
1281
+ },
1282
+ {
1283
+ "epoch": 29.26,
1284
+ "learning_rate": 3.351526717557251e-05,
1285
+ "loss": 0.512,
1286
+ "step": 16500
1287
+ },
1288
+ {
1289
+ "epoch": 29.26,
1290
+ "eval_loss": 0.10623880475759506,
1291
+ "eval_runtime": 31.0174,
1292
+ "eval_samples_per_second": 5.739,
1293
+ "eval_steps_per_second": 1.934,
1294
+ "eval_wer": 0.11555256970104132,
1295
+ "step": 16500
1296
+ },
1297
+ {
1298
+ "epoch": 29.43,
1299
+ "learning_rate": 3.322900763358778e-05,
1300
+ "loss": 0.5258,
1301
+ "step": 16600
1302
+ },
1303
+ {
1304
+ "epoch": 29.61,
1305
+ "learning_rate": 3.294274809160305e-05,
1306
+ "loss": 0.5366,
1307
+ "step": 16700
1308
+ },
1309
+ {
1310
+ "epoch": 29.79,
1311
+ "learning_rate": 3.2656488549618316e-05,
1312
+ "loss": 0.515,
1313
+ "step": 16800
1314
+ },
1315
+ {
1316
+ "epoch": 29.96,
1317
+ "learning_rate": 3.2370229007633586e-05,
1318
+ "loss": 0.5246,
1319
+ "step": 16900
1320
+ },
1321
+ {
1322
+ "epoch": 30.14,
1323
+ "learning_rate": 3.208396946564885e-05,
1324
+ "loss": 0.5082,
1325
+ "step": 17000
1326
+ },
1327
+ {
1328
+ "epoch": 30.14,
1329
+ "eval_loss": 0.10452543944120407,
1330
+ "eval_runtime": 30.9393,
1331
+ "eval_samples_per_second": 5.753,
1332
+ "eval_steps_per_second": 1.939,
1333
+ "eval_wer": 0.11353711790393013,
1334
+ "step": 17000
1335
+ },
1336
+ {
1337
+ "epoch": 30.32,
1338
+ "learning_rate": 3.179770992366412e-05,
1339
+ "loss": 0.5029,
1340
+ "step": 17100
1341
+ },
1342
+ {
1343
+ "epoch": 30.5,
1344
+ "learning_rate": 3.151431297709923e-05,
1345
+ "loss": 0.5219,
1346
+ "step": 17200
1347
+ },
1348
+ {
1349
+ "epoch": 30.67,
1350
+ "learning_rate": 3.12280534351145e-05,
1351
+ "loss": 0.5235,
1352
+ "step": 17300
1353
+ },
1354
+ {
1355
+ "epoch": 30.85,
1356
+ "learning_rate": 3.0941793893129764e-05,
1357
+ "loss": 0.5141,
1358
+ "step": 17400
1359
+ },
1360
+ {
1361
+ "epoch": 31.03,
1362
+ "learning_rate": 3.0655534351145035e-05,
1363
+ "loss": 0.5193,
1364
+ "step": 17500
1365
+ },
1366
+ {
1367
+ "epoch": 31.03,
1368
+ "eval_loss": 0.10911107808351517,
1369
+ "eval_runtime": 30.9608,
1370
+ "eval_samples_per_second": 5.749,
1371
+ "eval_steps_per_second": 1.938,
1372
+ "eval_wer": 0.11454484380248572,
1373
+ "step": 17500
1374
+ },
1375
+ {
1376
+ "epoch": 31.21,
1377
+ "learning_rate": 3.0369274809160305e-05,
1378
+ "loss": 0.5038,
1379
+ "step": 17600
1380
+ },
1381
+ {
1382
+ "epoch": 31.38,
1383
+ "learning_rate": 3.008301526717557e-05,
1384
+ "loss": 0.5086,
1385
+ "step": 17700
1386
+ },
1387
+ {
1388
+ "epoch": 31.56,
1389
+ "learning_rate": 2.979675572519084e-05,
1390
+ "loss": 0.5001,
1391
+ "step": 17800
1392
+ },
1393
+ {
1394
+ "epoch": 31.74,
1395
+ "learning_rate": 2.9510496183206105e-05,
1396
+ "loss": 0.5206,
1397
+ "step": 17900
1398
+ },
1399
+ {
1400
+ "epoch": 31.91,
1401
+ "learning_rate": 2.9224236641221372e-05,
1402
+ "loss": 0.5129,
1403
+ "step": 18000
1404
+ },
1405
+ {
1406
+ "epoch": 31.91,
1407
+ "eval_loss": 0.10398419201374054,
1408
+ "eval_runtime": 30.9924,
1409
+ "eval_samples_per_second": 5.743,
1410
+ "eval_steps_per_second": 1.936,
1411
+ "eval_wer": 0.10883439704400404,
1412
+ "step": 18000
1413
+ },
1414
+ {
1415
+ "epoch": 32.09,
1416
+ "learning_rate": 2.893797709923664e-05,
1417
+ "loss": 0.5105,
1418
+ "step": 18100
1419
+ },
1420
+ {
1421
+ "epoch": 32.27,
1422
+ "learning_rate": 2.8651717557251906e-05,
1423
+ "loss": 0.5062,
1424
+ "step": 18200
1425
+ },
1426
+ {
1427
+ "epoch": 32.45,
1428
+ "learning_rate": 2.8365458015267172e-05,
1429
+ "loss": 0.5021,
1430
+ "step": 18300
1431
+ },
1432
+ {
1433
+ "epoch": 32.62,
1434
+ "learning_rate": 2.807919847328244e-05,
1435
+ "loss": 0.5122,
1436
+ "step": 18400
1437
+ },
1438
+ {
1439
+ "epoch": 32.8,
1440
+ "learning_rate": 2.779293893129771e-05,
1441
+ "loss": 0.5126,
1442
+ "step": 18500
1443
+ },
1444
+ {
1445
+ "epoch": 32.8,
1446
+ "eval_loss": 0.10847991704940796,
1447
+ "eval_runtime": 30.8721,
1448
+ "eval_samples_per_second": 5.766,
1449
+ "eval_steps_per_second": 1.944,
1450
+ "eval_wer": 0.11689620423244877,
1451
+ "step": 18500
1452
+ },
1453
+ {
1454
+ "epoch": 32.98,
1455
+ "learning_rate": 2.7506679389312976e-05,
1456
+ "loss": 0.5179,
1457
+ "step": 18600
1458
+ },
1459
+ {
1460
+ "epoch": 33.16,
1461
+ "learning_rate": 2.7220419847328243e-05,
1462
+ "loss": 0.5049,
1463
+ "step": 18700
1464
+ },
1465
+ {
1466
+ "epoch": 33.33,
1467
+ "learning_rate": 2.693416030534351e-05,
1468
+ "loss": 0.5173,
1469
+ "step": 18800
1470
+ },
1471
+ {
1472
+ "epoch": 33.51,
1473
+ "learning_rate": 2.6647900763358777e-05,
1474
+ "loss": 0.5115,
1475
+ "step": 18900
1476
+ },
1477
+ {
1478
+ "epoch": 33.69,
1479
+ "learning_rate": 2.6361641221374043e-05,
1480
+ "loss": 0.496,
1481
+ "step": 19000
1482
+ },
1483
+ {
1484
+ "epoch": 33.69,
1485
+ "eval_loss": 0.10698471963405609,
1486
+ "eval_runtime": 31.005,
1487
+ "eval_samples_per_second": 5.741,
1488
+ "eval_steps_per_second": 1.935,
1489
+ "eval_wer": 0.11656029559959691,
1490
+ "step": 19000
1491
+ },
1492
+ {
1493
+ "epoch": 33.86,
1494
+ "learning_rate": 2.607538167938931e-05,
1495
+ "loss": 0.4938,
1496
+ "step": 19100
1497
+ },
1498
+ {
1499
+ "epoch": 34.04,
1500
+ "learning_rate": 2.5789122137404577e-05,
1501
+ "loss": 0.5128,
1502
+ "step": 19200
1503
+ },
1504
+ {
1505
+ "epoch": 34.22,
1506
+ "learning_rate": 2.550572519083969e-05,
1507
+ "loss": 0.497,
1508
+ "step": 19300
1509
+ },
1510
+ {
1511
+ "epoch": 34.4,
1512
+ "learning_rate": 2.521946564885496e-05,
1513
+ "loss": 0.4879,
1514
+ "step": 19400
1515
+ },
1516
+ {
1517
+ "epoch": 34.57,
1518
+ "learning_rate": 2.493320610687023e-05,
1519
+ "loss": 0.5017,
1520
+ "step": 19500
1521
+ },
1522
+ {
1523
+ "epoch": 34.57,
1524
+ "eval_loss": 0.11190272867679596,
1525
+ "eval_runtime": 30.8238,
1526
+ "eval_samples_per_second": 5.775,
1527
+ "eval_steps_per_second": 1.947,
1528
+ "eval_wer": 0.11622438696674504,
1529
+ "step": 19500
1530
+ },
1531
+ {
1532
+ "epoch": 34.75,
1533
+ "learning_rate": 2.4646946564885495e-05,
1534
+ "loss": 0.4994,
1535
+ "step": 19600
1536
+ },
1537
+ {
1538
+ "epoch": 34.93,
1539
+ "learning_rate": 2.436354961832061e-05,
1540
+ "loss": 0.4973,
1541
+ "step": 19700
1542
+ },
1543
+ {
1544
+ "epoch": 35.11,
1545
+ "learning_rate": 2.4077290076335876e-05,
1546
+ "loss": 0.5012,
1547
+ "step": 19800
1548
+ },
1549
+ {
1550
+ "epoch": 35.28,
1551
+ "learning_rate": 2.3791030534351143e-05,
1552
+ "loss": 0.4889,
1553
+ "step": 19900
1554
+ },
1555
+ {
1556
+ "epoch": 35.46,
1557
+ "learning_rate": 2.350477099236641e-05,
1558
+ "loss": 0.4808,
1559
+ "step": 20000
1560
+ },
1561
+ {
1562
+ "epoch": 35.46,
1563
+ "eval_loss": 0.11005562543869019,
1564
+ "eval_runtime": 31.0411,
1565
+ "eval_samples_per_second": 5.734,
1566
+ "eval_steps_per_second": 1.933,
1567
+ "eval_wer": 0.113873026536782,
1568
+ "step": 20000
1569
+ },
1570
+ {
1571
+ "epoch": 35.64,
1572
+ "learning_rate": 2.3218511450381677e-05,
1573
+ "loss": 0.4984,
1574
+ "step": 20100
1575
+ },
1576
+ {
1577
+ "epoch": 35.82,
1578
+ "learning_rate": 2.2932251908396944e-05,
1579
+ "loss": 0.4929,
1580
+ "step": 20200
1581
+ },
1582
+ {
1583
+ "epoch": 35.99,
1584
+ "learning_rate": 2.2645992366412214e-05,
1585
+ "loss": 0.4861,
1586
+ "step": 20300
1587
+ },
1588
+ {
1589
+ "epoch": 36.17,
1590
+ "learning_rate": 2.235973282442748e-05,
1591
+ "loss": 0.506,
1592
+ "step": 20400
1593
+ },
1594
+ {
1595
+ "epoch": 36.35,
1596
+ "learning_rate": 2.2073473282442747e-05,
1597
+ "loss": 0.4939,
1598
+ "step": 20500
1599
+ },
1600
+ {
1601
+ "epoch": 36.35,
1602
+ "eval_loss": 0.10813739150762558,
1603
+ "eval_runtime": 31.0724,
1604
+ "eval_samples_per_second": 5.729,
1605
+ "eval_steps_per_second": 1.931,
1606
+ "eval_wer": 0.11252939200537454,
1607
+ "step": 20500
1608
+ },
1609
+ {
1610
+ "epoch": 36.52,
1611
+ "learning_rate": 2.1787213740458014e-05,
1612
+ "loss": 0.4789,
1613
+ "step": 20600
1614
+ },
1615
+ {
1616
+ "epoch": 36.7,
1617
+ "learning_rate": 2.150095419847328e-05,
1618
+ "loss": 0.4904,
1619
+ "step": 20700
1620
+ },
1621
+ {
1622
+ "epoch": 36.88,
1623
+ "learning_rate": 2.1214694656488548e-05,
1624
+ "loss": 0.4916,
1625
+ "step": 20800
1626
+ },
1627
+ {
1628
+ "epoch": 37.06,
1629
+ "learning_rate": 2.0928435114503815e-05,
1630
+ "loss": 0.487,
1631
+ "step": 20900
1632
+ },
1633
+ {
1634
+ "epoch": 37.23,
1635
+ "learning_rate": 2.064217557251908e-05,
1636
+ "loss": 0.4738,
1637
+ "step": 21000
1638
+ },
1639
+ {
1640
+ "epoch": 37.23,
1641
+ "eval_loss": 0.10911141335964203,
1642
+ "eval_runtime": 31.0688,
1643
+ "eval_samples_per_second": 5.729,
1644
+ "eval_steps_per_second": 1.931,
1645
+ "eval_wer": 0.10984212294255963,
1646
+ "step": 21000
1647
+ },
1648
+ {
1649
+ "epoch": 37.41,
1650
+ "learning_rate": 2.0355916030534352e-05,
1651
+ "loss": 0.4757,
1652
+ "step": 21100
1653
+ },
1654
+ {
1655
+ "epoch": 37.59,
1656
+ "learning_rate": 2.006965648854962e-05,
1657
+ "loss": 0.4752,
1658
+ "step": 21200
1659
+ },
1660
+ {
1661
+ "epoch": 37.77,
1662
+ "learning_rate": 1.9783396946564885e-05,
1663
+ "loss": 0.4939,
1664
+ "step": 21300
1665
+ },
1666
+ {
1667
+ "epoch": 37.94,
1668
+ "learning_rate": 1.9497137404580152e-05,
1669
+ "loss": 0.4673,
1670
+ "step": 21400
1671
+ },
1672
+ {
1673
+ "epoch": 38.12,
1674
+ "learning_rate": 1.921087786259542e-05,
1675
+ "loss": 0.4978,
1676
+ "step": 21500
1677
+ },
1678
+ {
1679
+ "epoch": 38.12,
1680
+ "eval_loss": 0.10570317506790161,
1681
+ "eval_runtime": 31.0697,
1682
+ "eval_samples_per_second": 5.729,
1683
+ "eval_steps_per_second": 1.931,
1684
+ "eval_wer": 0.1091703056768559,
1685
+ "step": 21500
1686
+ },
1687
+ {
1688
+ "epoch": 38.3,
1689
+ "learning_rate": 1.8924618320610686e-05,
1690
+ "loss": 0.4829,
1691
+ "step": 21600
1692
+ },
1693
+ {
1694
+ "epoch": 38.47,
1695
+ "learning_rate": 1.8638358778625953e-05,
1696
+ "loss": 0.4857,
1697
+ "step": 21700
1698
+ },
1699
+ {
1700
+ "epoch": 38.65,
1701
+ "learning_rate": 1.835209923664122e-05,
1702
+ "loss": 0.4618,
1703
+ "step": 21800
1704
+ },
1705
+ {
1706
+ "epoch": 38.83,
1707
+ "learning_rate": 1.8065839694656486e-05,
1708
+ "loss": 0.4831,
1709
+ "step": 21900
1710
+ },
1711
+ {
1712
+ "epoch": 39.01,
1713
+ "learning_rate": 1.77824427480916e-05,
1714
+ "loss": 0.4972,
1715
+ "step": 22000
1716
+ },
1717
+ {
1718
+ "epoch": 39.01,
1719
+ "eval_loss": 0.10742757469415665,
1720
+ "eval_runtime": 31.1659,
1721
+ "eval_samples_per_second": 5.711,
1722
+ "eval_steps_per_second": 1.925,
1723
+ "eval_wer": 0.11051394020826336,
1724
+ "step": 22000
1725
+ },
1726
+ {
1727
+ "epoch": 39.18,
1728
+ "learning_rate": 1.7496183206106867e-05,
1729
+ "loss": 0.4974,
1730
+ "step": 22100
1731
+ },
1732
+ {
1733
+ "epoch": 39.36,
1734
+ "learning_rate": 1.7209923664122138e-05,
1735
+ "loss": 0.4778,
1736
+ "step": 22200
1737
+ },
1738
+ {
1739
+ "epoch": 39.54,
1740
+ "learning_rate": 1.6923664122137404e-05,
1741
+ "loss": 0.4863,
1742
+ "step": 22300
1743
+ },
1744
+ {
1745
+ "epoch": 39.72,
1746
+ "learning_rate": 1.663740458015267e-05,
1747
+ "loss": 0.4815,
1748
+ "step": 22400
1749
+ },
1750
+ {
1751
+ "epoch": 39.89,
1752
+ "learning_rate": 1.6351145038167938e-05,
1753
+ "loss": 0.4773,
1754
+ "step": 22500
1755
+ },
1756
+ {
1757
+ "epoch": 39.89,
1758
+ "eval_loss": 0.10622948408126831,
1759
+ "eval_runtime": 31.5999,
1760
+ "eval_samples_per_second": 5.633,
1761
+ "eval_steps_per_second": 1.899,
1762
+ "eval_wer": 0.11084984884111522,
1763
+ "step": 22500
1764
+ },
1765
+ {
1766
+ "epoch": 40.07,
1767
+ "learning_rate": 1.6064885496183205e-05,
1768
+ "loss": 0.4808,
1769
+ "step": 22600
1770
+ },
1771
+ {
1772
+ "epoch": 40.25,
1773
+ "learning_rate": 1.577862595419847e-05,
1774
+ "loss": 0.4804,
1775
+ "step": 22700
1776
+ },
1777
+ {
1778
+ "epoch": 40.43,
1779
+ "learning_rate": 1.549236641221374e-05,
1780
+ "loss": 0.4796,
1781
+ "step": 22800
1782
+ },
1783
+ {
1784
+ "epoch": 40.6,
1785
+ "learning_rate": 1.5206106870229005e-05,
1786
+ "loss": 0.4866,
1787
+ "step": 22900
1788
+ },
1789
+ {
1790
+ "epoch": 40.78,
1791
+ "learning_rate": 1.4919847328244272e-05,
1792
+ "loss": 0.4741,
1793
+ "step": 23000
1794
+ },
1795
+ {
1796
+ "epoch": 40.78,
1797
+ "eval_loss": 0.10567349940538406,
1798
+ "eval_runtime": 31.2551,
1799
+ "eval_samples_per_second": 5.695,
1800
+ "eval_steps_per_second": 1.92,
1801
+ "eval_wer": 0.10849848841115217,
1802
+ "step": 23000
1803
+ },
1804
+ {
1805
+ "epoch": 40.96,
1806
+ "learning_rate": 1.4633587786259542e-05,
1807
+ "loss": 0.4976,
1808
+ "step": 23100
1809
+ },
1810
+ {
1811
+ "epoch": 41.13,
1812
+ "learning_rate": 1.4347328244274809e-05,
1813
+ "loss": 0.4811,
1814
+ "step": 23200
1815
+ },
1816
+ {
1817
+ "epoch": 41.31,
1818
+ "learning_rate": 1.4061068702290076e-05,
1819
+ "loss": 0.4904,
1820
+ "step": 23300
1821
+ },
1822
+ {
1823
+ "epoch": 41.49,
1824
+ "learning_rate": 1.3774809160305341e-05,
1825
+ "loss": 0.4658,
1826
+ "step": 23400
1827
+ },
1828
+ {
1829
+ "epoch": 41.67,
1830
+ "learning_rate": 1.3488549618320608e-05,
1831
+ "loss": 0.4776,
1832
+ "step": 23500
1833
+ },
1834
+ {
1835
+ "epoch": 41.67,
1836
+ "eval_loss": 0.10771991312503815,
1837
+ "eval_runtime": 31.4029,
1838
+ "eval_samples_per_second": 5.668,
1839
+ "eval_steps_per_second": 1.911,
1840
+ "eval_wer": 0.10849848841115217,
1841
+ "step": 23500
1842
+ },
1843
+ {
1844
+ "epoch": 41.84,
1845
+ "learning_rate": 1.3202290076335878e-05,
1846
+ "loss": 0.4745,
1847
+ "step": 23600
1848
+ },
1849
+ {
1850
+ "epoch": 42.02,
1851
+ "learning_rate": 1.2916030534351145e-05,
1852
+ "loss": 0.4746,
1853
+ "step": 23700
1854
+ },
1855
+ {
1856
+ "epoch": 42.2,
1857
+ "learning_rate": 1.262977099236641e-05,
1858
+ "loss": 0.4858,
1859
+ "step": 23800
1860
+ },
1861
+ {
1862
+ "epoch": 42.38,
1863
+ "learning_rate": 1.2343511450381677e-05,
1864
+ "loss": 0.4687,
1865
+ "step": 23900
1866
+ },
1867
+ {
1868
+ "epoch": 42.55,
1869
+ "learning_rate": 1.2057251908396947e-05,
1870
+ "loss": 0.4637,
1871
+ "step": 24000
1872
+ },
1873
+ {
1874
+ "epoch": 42.55,
1875
+ "eval_loss": 0.10607700049877167,
1876
+ "eval_runtime": 31.045,
1877
+ "eval_samples_per_second": 5.734,
1878
+ "eval_steps_per_second": 1.933,
1879
+ "eval_wer": 0.10950621430970776,
1880
+ "step": 24000
1881
+ },
1882
+ {
1883
+ "epoch": 42.73,
1884
+ "learning_rate": 1.1770992366412214e-05,
1885
+ "loss": 0.4462,
1886
+ "step": 24100
1887
+ },
1888
+ {
1889
+ "epoch": 42.91,
1890
+ "learning_rate": 1.1484732824427479e-05,
1891
+ "loss": 0.4815,
1892
+ "step": 24200
1893
+ },
1894
+ {
1895
+ "epoch": 43.09,
1896
+ "learning_rate": 1.1198473282442746e-05,
1897
+ "loss": 0.461,
1898
+ "step": 24300
1899
+ },
1900
+ {
1901
+ "epoch": 43.26,
1902
+ "learning_rate": 1.0912213740458016e-05,
1903
+ "loss": 0.4594,
1904
+ "step": 24400
1905
+ },
1906
+ {
1907
+ "epoch": 43.44,
1908
+ "learning_rate": 1.0625954198473283e-05,
1909
+ "loss": 0.4853,
1910
+ "step": 24500
1911
+ },
1912
+ {
1913
+ "epoch": 43.44,
1914
+ "eval_loss": 0.10806521028280258,
1915
+ "eval_runtime": 31.4451,
1916
+ "eval_samples_per_second": 5.661,
1917
+ "eval_steps_per_second": 1.908,
1918
+ "eval_wer": 0.10749076251259658,
1919
+ "step": 24500
1920
+ },
1921
+ {
1922
+ "epoch": 43.62,
1923
+ "learning_rate": 1.033969465648855e-05,
1924
+ "loss": 0.4747,
1925
+ "step": 24600
1926
+ },
1927
+ {
1928
+ "epoch": 43.79,
1929
+ "learning_rate": 1.0053435114503815e-05,
1930
+ "loss": 0.4649,
1931
+ "step": 24700
1932
+ },
1933
+ {
1934
+ "epoch": 43.97,
1935
+ "learning_rate": 9.767175572519081e-06,
1936
+ "loss": 0.471,
1937
+ "step": 24800
1938
+ },
1939
+ {
1940
+ "epoch": 44.15,
1941
+ "learning_rate": 9.480916030534352e-06,
1942
+ "loss": 0.453,
1943
+ "step": 24900
1944
+ },
1945
+ {
1946
+ "epoch": 44.33,
1947
+ "learning_rate": 9.194656488549618e-06,
1948
+ "loss": 0.4602,
1949
+ "step": 25000
1950
+ },
1951
+ {
1952
+ "epoch": 44.33,
1953
+ "eval_loss": 0.1075613871216774,
1954
+ "eval_runtime": 31.217,
1955
+ "eval_samples_per_second": 5.702,
1956
+ "eval_steps_per_second": 1.922,
1957
+ "eval_wer": 0.10849848841115217,
1958
+ "step": 25000
1959
+ },
1960
+ {
1961
+ "epoch": 44.5,
1962
+ "learning_rate": 8.908396946564884e-06,
1963
+ "loss": 0.4579,
1964
+ "step": 25100
1965
+ },
1966
+ {
1967
+ "epoch": 44.68,
1968
+ "learning_rate": 8.622137404580152e-06,
1969
+ "loss": 0.4752,
1970
+ "step": 25200
1971
+ },
1972
+ {
1973
+ "epoch": 44.86,
1974
+ "learning_rate": 8.335877862595419e-06,
1975
+ "loss": 0.4708,
1976
+ "step": 25300
1977
+ },
1978
+ {
1979
+ "epoch": 45.04,
1980
+ "learning_rate": 8.049618320610687e-06,
1981
+ "loss": 0.4613,
1982
+ "step": 25400
1983
+ },
1984
+ {
1985
+ "epoch": 45.21,
1986
+ "learning_rate": 7.763358778625954e-06,
1987
+ "loss": 0.4667,
1988
+ "step": 25500
1989
+ },
1990
+ {
1991
+ "epoch": 45.21,
1992
+ "eval_loss": 0.1077902689576149,
1993
+ "eval_runtime": 31.0572,
1994
+ "eval_samples_per_second": 5.731,
1995
+ "eval_steps_per_second": 1.932,
1996
+ "eval_wer": 0.10782667114544844,
1997
+ "step": 25500
1998
+ },
1999
+ {
2000
+ "epoch": 45.39,
2001
+ "learning_rate": 7.477099236641221e-06,
2002
+ "loss": 0.4709,
2003
+ "step": 25600
2004
+ },
2005
+ {
2006
+ "epoch": 45.57,
2007
+ "learning_rate": 7.190839694656488e-06,
2008
+ "loss": 0.4523,
2009
+ "step": 25700
2010
+ },
2011
+ {
2012
+ "epoch": 45.74,
2013
+ "learning_rate": 6.9045801526717555e-06,
2014
+ "loss": 0.4537,
2015
+ "step": 25800
2016
+ },
2017
+ {
2018
+ "epoch": 45.92,
2019
+ "learning_rate": 6.62118320610687e-06,
2020
+ "loss": 0.4642,
2021
+ "step": 25900
2022
+ },
2023
+ {
2024
+ "epoch": 46.1,
2025
+ "learning_rate": 6.334923664122137e-06,
2026
+ "loss": 0.4484,
2027
+ "step": 26000
2028
+ },
2029
+ {
2030
+ "epoch": 46.1,
2031
+ "eval_loss": 0.10564640909433365,
2032
+ "eval_runtime": 31.5659,
2033
+ "eval_samples_per_second": 5.639,
2034
+ "eval_steps_per_second": 1.901,
2035
+ "eval_wer": 0.10816257977830031,
2036
+ "step": 26000
2037
+ },
2038
+ {
2039
+ "epoch": 46.28,
2040
+ "learning_rate": 6.048664122137404e-06,
2041
+ "loss": 0.4505,
2042
+ "step": 26100
2043
+ },
2044
+ {
2045
+ "epoch": 46.45,
2046
+ "learning_rate": 5.762404580152671e-06,
2047
+ "loss": 0.4758,
2048
+ "step": 26200
2049
+ },
2050
+ {
2051
+ "epoch": 46.63,
2052
+ "learning_rate": 5.476145038167939e-06,
2053
+ "loss": 0.4528,
2054
+ "step": 26300
2055
+ },
2056
+ {
2057
+ "epoch": 46.81,
2058
+ "learning_rate": 5.1898854961832056e-06,
2059
+ "loss": 0.4579,
2060
+ "step": 26400
2061
+ },
2062
+ {
2063
+ "epoch": 46.99,
2064
+ "learning_rate": 4.903625954198473e-06,
2065
+ "loss": 0.4601,
2066
+ "step": 26500
2067
+ },
2068
+ {
2069
+ "epoch": 46.99,
2070
+ "eval_loss": 0.10663167387247086,
2071
+ "eval_runtime": 31.3245,
2072
+ "eval_samples_per_second": 5.682,
2073
+ "eval_steps_per_second": 1.915,
2074
+ "eval_wer": 0.10782667114544844,
2075
+ "step": 26500
2076
+ },
2077
+ {
2078
+ "epoch": 47.16,
2079
+ "learning_rate": 4.61736641221374e-06,
2080
+ "loss": 0.4634,
2081
+ "step": 26600
2082
+ },
2083
+ {
2084
+ "epoch": 47.34,
2085
+ "learning_rate": 4.331106870229007e-06,
2086
+ "loss": 0.4603,
2087
+ "step": 26700
2088
+ },
2089
+ {
2090
+ "epoch": 47.52,
2091
+ "learning_rate": 4.0448473282442745e-06,
2092
+ "loss": 0.4653,
2093
+ "step": 26800
2094
+ },
2095
+ {
2096
+ "epoch": 47.69,
2097
+ "learning_rate": 3.7585877862595418e-06,
2098
+ "loss": 0.4619,
2099
+ "step": 26900
2100
+ },
2101
+ {
2102
+ "epoch": 47.87,
2103
+ "learning_rate": 3.472328244274809e-06,
2104
+ "loss": 0.4691,
2105
+ "step": 27000
2106
+ },
2107
+ {
2108
+ "epoch": 47.87,
2109
+ "eval_loss": 0.10679604858160019,
2110
+ "eval_runtime": 31.1673,
2111
+ "eval_samples_per_second": 5.711,
2112
+ "eval_steps_per_second": 1.925,
2113
+ "eval_wer": 0.10849848841115217,
2114
+ "step": 27000
2115
+ },
2116
+ {
2117
+ "epoch": 48.05,
2118
+ "learning_rate": 3.1860687022900762e-06,
2119
+ "loss": 0.4582,
2120
+ "step": 27100
2121
+ },
2122
+ {
2123
+ "epoch": 48.23,
2124
+ "learning_rate": 2.8998091603053435e-06,
2125
+ "loss": 0.4439,
2126
+ "step": 27200
2127
+ },
2128
+ {
2129
+ "epoch": 48.4,
2130
+ "learning_rate": 2.6135496183206107e-06,
2131
+ "loss": 0.4617,
2132
+ "step": 27300
2133
+ },
2134
+ {
2135
+ "epoch": 48.58,
2136
+ "learning_rate": 2.3272900763358775e-06,
2137
+ "loss": 0.4538,
2138
+ "step": 27400
2139
+ },
2140
+ {
2141
+ "epoch": 48.76,
2142
+ "learning_rate": 2.0410305343511447e-06,
2143
+ "loss": 0.4457,
2144
+ "step": 27500
2145
+ },
2146
+ {
2147
+ "epoch": 48.76,
2148
+ "eval_loss": 0.10663049668073654,
2149
+ "eval_runtime": 31.2143,
2150
+ "eval_samples_per_second": 5.703,
2151
+ "eval_steps_per_second": 1.922,
2152
+ "eval_wer": 0.10782667114544844,
2153
+ "step": 27500
2154
+ },
2155
+ {
2156
+ "epoch": 48.94,
2157
+ "learning_rate": 1.7547709923664122e-06,
2158
+ "loss": 0.4766,
2159
+ "step": 27600
2160
+ },
2161
+ {
2162
+ "epoch": 49.11,
2163
+ "learning_rate": 1.4685114503816792e-06,
2164
+ "loss": 0.4576,
2165
+ "step": 27700
2166
+ },
2167
+ {
2168
+ "epoch": 49.29,
2169
+ "learning_rate": 1.185114503816794e-06,
2170
+ "loss": 0.4616,
2171
+ "step": 27800
2172
+ },
2173
+ {
2174
+ "epoch": 49.47,
2175
+ "learning_rate": 8.98854961832061e-07,
2176
+ "loss": 0.4588,
2177
+ "step": 27900
2178
+ },
2179
+ {
2180
+ "epoch": 49.65,
2181
+ "learning_rate": 6.125954198473282e-07,
2182
+ "loss": 0.475,
2183
+ "step": 28000
2184
+ },
2185
+ {
2186
+ "epoch": 49.65,
2187
+ "eval_loss": 0.1059938296675682,
2188
+ "eval_runtime": 32.0006,
2189
+ "eval_samples_per_second": 5.562,
2190
+ "eval_steps_per_second": 1.875,
2191
+ "eval_wer": 0.10816257977830031,
2192
+ "step": 28000
2193
+ },
2194
+ {
2195
+ "epoch": 49.82,
2196
+ "learning_rate": 3.263358778625954e-07,
2197
+ "loss": 0.4622,
2198
+ "step": 28100
2199
+ },
2200
+ {
2201
+ "epoch": 50.0,
2202
+ "learning_rate": 4.0076335877862596e-08,
2203
+ "loss": 0.4567,
2204
+ "step": 28200
2205
+ },
2206
+ {
2207
+ "epoch": 50.0,
2208
+ "step": 28200,
2209
+ "total_flos": 6.992186583697227e+19,
2210
+ "train_loss": 0.6889870901987062,
2211
+ "train_runtime": 132713.9902,
2212
+ "train_samples_per_second": 2.551,
2213
+ "train_steps_per_second": 0.212
2214
+ }
2215
+ ],
2216
+ "max_steps": 28200,
2217
+ "num_train_epochs": 50,
2218
+ "total_flos": 6.992186583697227e+19,
2219
+ "trial_name": null,
2220
+ "trial_params": null
2221
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95b1c12bb3e43e845c3fafc74da0962a2f160b7c3a7e2ee8805288861ea2bbfe
3
+ size 3183
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"&": 1, "'": 2, "(": 3, ")": 4, "/": 5, "0": 6, "1": 7, "2": 8, "3": 9, "4": 10, "5": 11, "7": 12, "9": 13, "a": 14, "b": 15, "c": 16, "d": 17, "e": 18, "f": 19, "g": 20, "h": 21, "i": 22, "j": 23, "k": 24, "l": 25, "m": 26, "n": 27, "o": 28, "p": 29, "q": 30, "r": 31, "s": 32, "t": 33, "u": 34, "v": 35, "w": 36, "x": 37, "y": 38, "z": 39, "ß": 40, "à": 41, "â": 42, "ä": 43, "ç": 44, "è": 45, "é": 46, "ê": 47, "ë": 48, "î": 49, "ï": 50, "ô": 51, "ö": 52, "û": 53, "ü": 54, "|": 0, "[UNK]": 55, "[PAD]": 56}