pere commited on
Commit
f796d1d
1 Parent(s): e00b461

End of training

Browse files
added_tokens.json CHANGED
@@ -1 +1 @@
1
- {"<s>": 35, "</s>": 36}
 
1
+ {"<s>": 37, "</s>": 38}
all_results.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_loss": 3.0429327487945557,
4
- "eval_runtime": 7.4537,
5
- "eval_samples": 99,
6
- "eval_samples_per_second": 13.282,
7
- "eval_steps_per_second": 1.744,
8
- "eval_wer": 1.0,
9
- "train_loss": 4.839959698338663,
10
- "train_runtime": 859.0241,
11
- "train_samples": 990,
12
- "train_samples_per_second": 11.525,
13
- "train_steps_per_second": 0.361
14
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_loss": 0.23142604529857635,
4
+ "eval_runtime": 3.4384,
5
+ "eval_samples": 100,
6
+ "eval_samples_per_second": 29.083,
7
+ "eval_steps_per_second": 1.163,
8
+ "eval_wer": 0.64,
9
+ "train_loss": 3.968093375272529,
10
+ "train_runtime": 1702.164,
11
+ "train_samples": 11030,
12
+ "train_samples_per_second": 32.4,
13
+ "train_steps_per_second": 0.253
14
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
  "activation_dropout": 0.1,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
@@ -59,12 +59,20 @@
59
  "intermediate_size": 4096,
60
  "layer_norm_eps": 1e-05,
61
  "layerdrop": 0.0,
 
 
 
 
 
62
  "mask_feature_length": 64,
63
  "mask_feature_min_masks": 0,
64
  "mask_feature_prob": 0.25,
65
  "mask_time_length": 10,
66
  "mask_time_min_masks": 2,
 
 
67
  "mask_time_prob": 0.75,
 
68
  "model_type": "wav2vec2",
69
  "num_adapter_layers": 3,
70
  "num_attention_heads": 16,
@@ -76,7 +84,7 @@
76
  "num_hidden_layers": 24,
77
  "num_negatives": 100,
78
  "output_hidden_size": 1024,
79
- "pad_token_id": 34,
80
  "proj_codevector_dim": 768,
81
  "tdnn_dilation": [
82
  1,
@@ -102,6 +110,6 @@
102
  "torch_dtype": "float32",
103
  "transformers_version": "4.16.0.dev0",
104
  "use_weighted_layer_sum": false,
105
- "vocab_size": 37,
106
  "xvector_output_dim": 512
107
  }
 
1
  {
2
+ "_name_or_path": "KBLab/wav2vec2-large-voxrex",
3
  "activation_dropout": 0.1,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
 
59
  "intermediate_size": 4096,
60
  "layer_norm_eps": 1e-05,
61
  "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
  "mask_feature_length": 64,
68
  "mask_feature_min_masks": 0,
69
  "mask_feature_prob": 0.25,
70
  "mask_time_length": 10,
71
  "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
  "mask_time_prob": 0.75,
75
+ "mask_time_selection": "static",
76
  "model_type": "wav2vec2",
77
  "num_adapter_layers": 3,
78
  "num_attention_heads": 16,
 
84
  "num_hidden_layers": 24,
85
  "num_negatives": 100,
86
  "output_hidden_size": 1024,
87
+ "pad_token_id": 36,
88
  "proj_codevector_dim": 768,
89
  "tdnn_dilation": [
90
  1,
 
110
  "torch_dtype": "float32",
111
  "transformers_version": "4.16.0.dev0",
112
  "use_weighted_layer_sum": false,
113
+ "vocab_size": 39,
114
  "xvector_output_dim": 512
115
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_loss": 3.0429327487945557,
4
- "eval_runtime": 7.4537,
5
- "eval_samples": 99,
6
- "eval_samples_per_second": 13.282,
7
- "eval_steps_per_second": 1.744,
8
- "eval_wer": 1.0
9
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_loss": 0.23142604529857635,
4
+ "eval_runtime": 3.4384,
5
+ "eval_samples": 100,
6
+ "eval_samples_per_second": 29.083,
7
+ "eval_steps_per_second": 1.163,
8
+ "eval_wer": 0.64
9
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0cc867c429ded34f283faefa77e1e0ab176af45af763e24e89b71718944c252
3
- size 1262075377
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:205744125a0f7786674fb1a8ba8fa2a297566d91e7a574412df0d88696bec7e9
3
+ size 1262083569
run.sh CHANGED
@@ -1,23 +1,28 @@
1
- python run_speech_recognition_ctc.py \
2
- --dataset_name="NbAiLab/NPSC" \
3
- --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
- --dataset_config_name="48K_mp3" \
 
 
 
 
 
 
5
  --output_dir="./" \
6
  --overwrite_output_dir \
7
  --hub_model_id="NbAiLab/xls-npsc-oh" \
8
- --num_train_epochs="10" \
9
- --per_device_train_batch_size="8" \
10
- --per_device_eval_batch_size="8" \
11
  --gradient_accumulation_steps="4" \
12
- --learning_rate="7.5e-5" \
13
- --warmup_steps="500" \
14
- --length_column_name="input_length" \
15
  --evaluation_strategy="steps" \
16
- --text_column_name="text" \
17
  --chars_to_ignore , ? . ! \- \; \: \" " % ‘ " � — ’ … – \
18
- --save_steps="500" \
19
- --eval_steps="500" \
20
- --logging_steps="100" \
21
  --layerdrop="0.0" \
22
  --activation_dropout="0.1" \
23
  --save_total_limit="3" \
@@ -32,6 +37,5 @@ python run_speech_recognition_ctc.py \
32
  --fp16 \
33
  --group_by_length \
34
  --do_train --do_eval \
35
- --max_train_samples="1000" \
36
  --max_eval_samples="100" \
37
  --push_to_hub
 
1
+ # --dataset_name="NbAiLab/NPSC" \
2
+ # --dataset_config_name="48K_mp3" \
3
+ # --text_column_name="text" \
4
+
5
+
6
+ python run_speech_recognition_ctc_bnb.py \
7
+ --dataset_name="mozilla-foundation/common_voice_7_0" \
8
+ --dataset_config_name="sv-SE" \
9
+ --text_column_name="sentence" \
10
+ --model_name_or_path="KBLab/wav2vec2-large-voxrex" \
11
  --output_dir="./" \
12
  --overwrite_output_dir \
13
  --hub_model_id="NbAiLab/xls-npsc-oh" \
14
+ --num_train_epochs="5" \
15
+ --per_device_train_batch_size="32" \
16
+ --per_device_eval_batch_size="32" \
17
  --gradient_accumulation_steps="4" \
18
+ --learning_rate="5e-5" \
19
+ --warmup_steps="1000" \
 
20
  --evaluation_strategy="steps" \
21
+ --length_column_name="input_length" \
22
  --chars_to_ignore , ? . ! \- \; \: \" " % ‘ " � — ’ … – \
23
+ --save_steps="1000" \
24
+ --eval_steps="1000" \
25
+ --logging_steps="1000" \
26
  --layerdrop="0.0" \
27
  --activation_dropout="0.1" \
28
  --save_total_limit="3" \
 
37
  --fp16 \
38
  --group_by_length \
39
  --do_train --do_eval \
 
40
  --max_eval_samples="100" \
41
  --push_to_hub
run_speech_recognition_ctc_bnb.py ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import bitsandbytes as bnb
34
+ import transformers
35
+ from transformers import (
36
+ AutoConfig,
37
+ AutoFeatureExtractor,
38
+ AutoModelForCTC,
39
+ AutoProcessor,
40
+ AutoTokenizer,
41
+ HfArgumentParser,
42
+ Trainer,
43
+ TrainingArguments,
44
+ Wav2Vec2Processor,
45
+ set_seed,
46
+ )
47
+ from transformers.trainer_pt_utils import get_parameter_names
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+ from transformers.utils.versions import require_version
51
+
52
+
53
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
54
+ check_min_version("4.16.0.dev0")
55
+
56
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
57
+
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ def list_field(default=None, metadata=None):
63
+ return field(default_factory=lambda: default, metadata=metadata)
64
+
65
+
66
+ @dataclass
67
+ class ModelArguments:
68
+ """
69
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
70
+ """
71
+
72
+ model_name_or_path: str = field(
73
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
74
+ )
75
+ tokenizer_name_or_path: Optional[str] = field(
76
+ default=None,
77
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
78
+ )
79
+ cache_dir: Optional[str] = field(
80
+ default=None,
81
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
82
+ )
83
+ freeze_feature_encoder: bool = field(
84
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
85
+ )
86
+ attention_dropout: float = field(
87
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
88
+ )
89
+ activation_dropout: float = field(
90
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
91
+ )
92
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
93
+ hidden_dropout: float = field(
94
+ default=0.0,
95
+ metadata={
96
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
97
+ },
98
+ )
99
+ final_dropout: float = field(
100
+ default=0.0,
101
+ metadata={"help": "The dropout probability for the final projection layer."},
102
+ )
103
+ mask_time_prob: float = field(
104
+ default=0.05,
105
+ metadata={
106
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
107
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
108
+ "vectors will be masked along the time axis."
109
+ },
110
+ )
111
+ mask_time_length: int = field(
112
+ default=10,
113
+ metadata={"help": "Length of vector span to mask along the time axis."},
114
+ )
115
+ mask_feature_prob: float = field(
116
+ default=0.0,
117
+ metadata={
118
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
119
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
120
+ },
121
+ )
122
+ mask_feature_length: int = field(
123
+ default=10,
124
+ metadata={"help": "Length of vector span to mask along the feature axis."},
125
+ )
126
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
127
+ ctc_loss_reduction: Optional[str] = field(
128
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
129
+ )
130
+
131
+
132
+ @dataclass
133
+ class DataTrainingArguments:
134
+ """
135
+ Arguments pertaining to what data we are going to input our model for training and eval.
136
+
137
+ Using `HfArgumentParser` we can turn this class
138
+ into argparse arguments to be able to specify them on
139
+ the command line.
140
+ """
141
+
142
+ dataset_name: str = field(
143
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ dataset_config_name: str = field(
146
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ train_split_name: str = field(
149
+ default="train+validation",
150
+ metadata={
151
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
152
+ },
153
+ )
154
+ eval_split_name: str = field(
155
+ default="test",
156
+ metadata={
157
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
158
+ },
159
+ )
160
+ audio_column_name: str = field(
161
+ default="audio",
162
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
163
+ )
164
+ text_column_name: str = field(
165
+ default="text",
166
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
167
+ )
168
+ overwrite_cache: bool = field(
169
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
170
+ )
171
+ preprocessing_num_workers: Optional[int] = field(
172
+ default=None,
173
+ metadata={"help": "The number of processes to use for the preprocessing."},
174
+ )
175
+ max_train_samples: Optional[int] = field(
176
+ default=None,
177
+ metadata={
178
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
179
+ "value if set."
180
+ },
181
+ )
182
+ max_eval_samples: Optional[int] = field(
183
+ default=None,
184
+ metadata={
185
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
186
+ "value if set."
187
+ },
188
+ )
189
+ chars_to_ignore: Optional[List[str]] = list_field(
190
+ default=None,
191
+ metadata={"help": "A list of characters to remove from the transcripts."},
192
+ )
193
+ eval_metrics: List[str] = list_field(
194
+ default=["wer"],
195
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
196
+ )
197
+ max_duration_in_seconds: float = field(
198
+ default=20.0,
199
+ metadata={
200
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
201
+ },
202
+ )
203
+ min_duration_in_seconds: float = field(
204
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
205
+ )
206
+ preprocessing_only: bool = field(
207
+ default=False,
208
+ metadata={
209
+ "help": "Whether to only do data preprocessing and skip training. "
210
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
211
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
212
+ "so that the cached datasets can consequently be loaded in distributed training"
213
+ },
214
+ )
215
+ use_auth_token: bool = field(
216
+ default=False,
217
+ metadata={
218
+ "help": "If :obj:`True`, will use the token generated when running"
219
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
220
+ },
221
+ )
222
+ unk_token: str = field(
223
+ default="[UNK]",
224
+ metadata={"help": "The unk token for the tokenizer"},
225
+ )
226
+ pad_token: str = field(
227
+ default="[PAD]",
228
+ metadata={"help": "The padding token for the tokenizer"},
229
+ )
230
+ word_delimiter_token: str = field(
231
+ default="|",
232
+ metadata={"help": "The word delimiter token for the tokenizer"},
233
+ )
234
+ phoneme_language: Optional[str] = field(
235
+ default=None,
236
+ metadata={
237
+ "help": "The target language that should be used be"
238
+ " passed to the tokenizer for tokenization. Note that"
239
+ " this is only relevant if the model classifies the"
240
+ " input audio to a sequence of phoneme sequences."
241
+ },
242
+ )
243
+
244
+
245
+ @dataclass
246
+ class DataCollatorCTCWithPadding:
247
+ """
248
+ Data collator that will dynamically pad the inputs received.
249
+ Args:
250
+ processor (:class:`~transformers.AutoProcessor`)
251
+ The processor used for proccessing the data.
252
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
253
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
254
+ among:
255
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
256
+ sequence if provided).
257
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
258
+ maximum acceptable input length for the model if that argument is not provided.
259
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
260
+ different lengths).
261
+ max_length (:obj:`int`, `optional`):
262
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
263
+ max_length_labels (:obj:`int`, `optional`):
264
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
265
+ pad_to_multiple_of (:obj:`int`, `optional`):
266
+ If set will pad the sequence to a multiple of the provided value.
267
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
268
+ 7.5 (Volta).
269
+ """
270
+
271
+ processor: AutoProcessor
272
+ padding: Union[bool, str] = "longest"
273
+ pad_to_multiple_of: Optional[int] = None
274
+ pad_to_multiple_of_labels: Optional[int] = None
275
+
276
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
277
+ # split inputs and labels since they have to be of different lenghts and need
278
+ # different padding methods
279
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
280
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
281
+
282
+ batch = self.processor.pad(
283
+ input_features,
284
+ padding=self.padding,
285
+ pad_to_multiple_of=self.pad_to_multiple_of,
286
+ return_tensors="pt",
287
+ )
288
+
289
+ with self.processor.as_target_processor():
290
+ labels_batch = self.processor.pad(
291
+ label_features,
292
+ padding=self.padding,
293
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
294
+ return_tensors="pt",
295
+ )
296
+
297
+ # replace padding with -100 to ignore loss correctly
298
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
299
+
300
+ batch["labels"] = labels
301
+
302
+ return batch
303
+
304
+
305
+ def create_vocabulary_from_data(
306
+ datasets: DatasetDict,
307
+ word_delimiter_token: Optional[str] = None,
308
+ unk_token: Optional[str] = None,
309
+ pad_token: Optional[str] = None,
310
+ ):
311
+ # Given training and test labels create vocabulary
312
+ def extract_all_chars(batch):
313
+ all_text = " ".join(batch["target_text"])
314
+ vocab = list(set(all_text))
315
+ return {"vocab": [vocab], "all_text": [all_text]}
316
+
317
+ vocabs = datasets.map(
318
+ extract_all_chars,
319
+ batched=True,
320
+ batch_size=-1,
321
+ keep_in_memory=True,
322
+ remove_columns=datasets["train"].column_names,
323
+ )
324
+
325
+ # take union of all unique characters in each dataset
326
+ vocab_set = functools.reduce(
327
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
328
+ )
329
+
330
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
331
+
332
+ # replace white space with delimiter token
333
+ if word_delimiter_token is not None:
334
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
335
+ del vocab_dict[" "]
336
+
337
+ # add unk and pad token
338
+ if unk_token is not None:
339
+ vocab_dict[unk_token] = len(vocab_dict)
340
+
341
+ if pad_token is not None:
342
+ vocab_dict[pad_token] = len(vocab_dict)
343
+
344
+ return vocab_dict
345
+
346
+
347
+ def main():
348
+ # See all possible arguments in src/transformers/training_args.py
349
+ # or by passing the --help flag to this script.
350
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
351
+
352
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
353
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
354
+ # If we pass only one argument to the script and it's the path to a json file,
355
+ # let's parse it to get our arguments.
356
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
357
+ else:
358
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
359
+
360
+ # Detecting last checkpoint.
361
+ last_checkpoint = None
362
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
363
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
364
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
365
+ raise ValueError(
366
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
367
+ "Use --overwrite_output_dir to overcome."
368
+ )
369
+ elif last_checkpoint is not None:
370
+ logger.info(
371
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
372
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
373
+ )
374
+
375
+ # Setup logging
376
+ logging.basicConfig(
377
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
378
+ datefmt="%m/%d/%Y %H:%M:%S",
379
+ handlers=[logging.StreamHandler(sys.stdout)],
380
+ )
381
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
382
+
383
+ # Log on each process the small summary:
384
+ logger.warning(
385
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
386
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
387
+ )
388
+ # Set the verbosity to info of the Transformers logger (on main process only):
389
+ if is_main_process(training_args.local_rank):
390
+ transformers.utils.logging.set_verbosity_info()
391
+ logger.info("Training/evaluation parameters %s", training_args)
392
+
393
+ # Set seed before initializing model.
394
+ set_seed(training_args.seed)
395
+
396
+ # Pre-processing dataset
397
+ def preprocess_dataset(entry):
398
+ return (
399
+ "<INAUDIBLE>" not in entry["text"]
400
+ and entry["sentence_language_code"].lower() == "nb-no"
401
+ )
402
+
403
+ def map_dataset(entry):
404
+ return {"text": (entry["text"]
405
+ .lower()
406
+ .replace("<ee>", "eee")
407
+ .replace("<mm>", "mmm")
408
+ .replace("<qq>", "qqq")
409
+ .replace("ó", "o")
410
+ .replace("é", "e")
411
+ )}
412
+
413
+
414
+
415
+ # 1. First, let's load the dataset
416
+ raw_datasets = DatasetDict()
417
+
418
+ if training_args.do_train:
419
+ raw_datasets["train"] = load_dataset(
420
+ data_args.dataset_name,
421
+ data_args.dataset_config_name,
422
+ split=data_args.train_split_name,
423
+ use_auth_token=data_args.use_auth_token,
424
+ )
425
+ #raw_datasets["train"] = raw_datasets["train"].filter(preprocess_dataset)
426
+ #raw_datasets["train"] = raw_datasets["train"].map(map_dataset)
427
+
428
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
429
+ raise ValueError(
430
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
431
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
432
+ f"{', '.join(raw_datasets['train'].column_names)}."
433
+ )
434
+
435
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
436
+ raise ValueError(
437
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
438
+ "Make sure to set `--text_column_name` to the correct text column - one of "
439
+ f"{', '.join(raw_datasets['train'].column_names)}."
440
+ )
441
+
442
+ if data_args.max_train_samples is not None:
443
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
444
+
445
+ if training_args.do_eval:
446
+ raw_datasets["eval"] = load_dataset(
447
+ data_args.dataset_name,
448
+ data_args.dataset_config_name,
449
+ split=data_args.eval_split_name,
450
+ use_auth_token=data_args.use_auth_token,
451
+ )
452
+ #raw_datasets["eval"] = raw_datasets["eval"].filter(preprocess_dataset)
453
+ #raw_datasets["eval"] = raw_datasets["eval"].map(map_dataset)
454
+
455
+ if data_args.max_eval_samples is not None:
456
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
457
+
458
+ # 2. We remove some special characters from the datasets
459
+ # that make training complicated and do not help in transcribing the speech
460
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
461
+ # that could be easily picked up by the model
462
+ chars_to_ignore_regex = (
463
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
464
+ )
465
+ text_column_name = data_args.text_column_name
466
+
467
+ def remove_special_characters(batch):
468
+ if chars_to_ignore_regex is not None:
469
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
470
+ else:
471
+ batch["target_text"] = batch[text_column_name].lower() + " "
472
+ return batch
473
+
474
+ with training_args.main_process_first(desc="dataset map special characters removal"):
475
+ raw_datasets = raw_datasets.map(
476
+ remove_special_characters,
477
+ remove_columns=[text_column_name],
478
+ desc="remove special characters from datasets",
479
+ )
480
+
481
+ # save special tokens for tokenizer
482
+ word_delimiter_token = data_args.word_delimiter_token
483
+ unk_token = data_args.unk_token
484
+ pad_token = data_args.pad_token
485
+
486
+ # 3. Next, let's load the config as we might need it to create
487
+ # the tokenizer
488
+ # load config
489
+ config = AutoConfig.from_pretrained(
490
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
491
+ )
492
+
493
+ # 4. Next, if no tokenizer file is defined,
494
+ # we create the vocabulary of the model by extracting all unique characters from
495
+ # the training and evaluation datasets
496
+ # We need to make sure that only first rank saves vocabulary
497
+ # make sure all processes wait until vocab is created
498
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
499
+ tokenizer_kwargs = {}
500
+ if tokenizer_name_or_path is None:
501
+ # save vocab in training output dir
502
+ tokenizer_name_or_path = training_args.output_dir
503
+
504
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
505
+
506
+ with training_args.main_process_first():
507
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
508
+ os.remove(vocab_file)
509
+
510
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
511
+ if not os.path.isfile(vocab_file):
512
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
513
+ vocab_dict = create_vocabulary_from_data(
514
+ raw_datasets,
515
+ word_delimiter_token=word_delimiter_token,
516
+ unk_token=unk_token,
517
+ pad_token=pad_token,
518
+ )
519
+
520
+ # save vocab dict to be loaded into tokenizer
521
+ with open(vocab_file, "w") as file:
522
+ json.dump(vocab_dict, file)
523
+
524
+ # if tokenizer has just been created
525
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
526
+ tokenizer_kwargs = {
527
+ "config": config if config.tokenizer_class is not None else None,
528
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
529
+ "unk_token": unk_token,
530
+ "pad_token": pad_token,
531
+ "word_delimiter_token": word_delimiter_token,
532
+ }
533
+
534
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
535
+ # Note for distributed training, the .from_pretrained methods guarantee that only
536
+ # one local process can concurrently download model & vocab.
537
+
538
+ # load feature_extractor and tokenizer
539
+ tokenizer = AutoTokenizer.from_pretrained(
540
+ tokenizer_name_or_path,
541
+ use_auth_token=data_args.use_auth_token,
542
+ **tokenizer_kwargs,
543
+ )
544
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
545
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
546
+ )
547
+
548
+ # adapt config
549
+ config.update(
550
+ {
551
+ "feat_proj_dropout": model_args.feat_proj_dropout,
552
+ "attention_dropout": model_args.attention_dropout,
553
+ "hidden_dropout": model_args.hidden_dropout,
554
+ "final_dropout": model_args.final_dropout,
555
+ "mask_time_prob": model_args.mask_time_prob,
556
+ "mask_time_length": model_args.mask_time_length,
557
+ "mask_feature_prob": model_args.mask_feature_prob,
558
+ "mask_feature_length": model_args.mask_feature_length,
559
+ "gradient_checkpointing": training_args.gradient_checkpointing,
560
+ "layerdrop": model_args.layerdrop,
561
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
562
+ "pad_token_id": tokenizer.pad_token_id,
563
+ "vocab_size": len(tokenizer),
564
+ "activation_dropout": model_args.activation_dropout,
565
+ }
566
+ )
567
+
568
+ # create model
569
+ model = AutoModelForCTC.from_pretrained(
570
+ model_args.model_name_or_path,
571
+ cache_dir=model_args.cache_dir,
572
+ config=config,
573
+ use_auth_token=data_args.use_auth_token,
574
+ )
575
+
576
+ # freeze encoder
577
+ if model_args.freeze_feature_encoder:
578
+ model.freeze_feature_encoder()
579
+
580
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
581
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
582
+ # so that we just need to set the correct target sampling rate and normalize the input
583
+ # via the `feature_extractor`
584
+
585
+ # make sure that dataset decodes audio with correct sampling rate
586
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
587
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
588
+ raw_datasets = raw_datasets.cast_column(
589
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
590
+ )
591
+
592
+ # derive max & min input length for sample rate & max duration
593
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
594
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
595
+ audio_column_name = data_args.audio_column_name
596
+ num_workers = data_args.preprocessing_num_workers
597
+
598
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
599
+ phoneme_language = data_args.phoneme_language
600
+
601
+ # Preprocessing the datasets.
602
+ # We need to read the audio files as arrays and tokenize the targets.
603
+ def prepare_dataset(batch):
604
+ # load audio
605
+ sample = batch[audio_column_name]
606
+
607
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
608
+ batch["input_values"] = inputs.input_values[0]
609
+ batch["input_length"] = len(batch["input_values"])
610
+
611
+ # encode targets
612
+ additional_kwargs = {}
613
+ if phoneme_language is not None:
614
+ additional_kwargs["phonemizer_lang"] = phoneme_language
615
+
616
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
617
+ return batch
618
+
619
+ with training_args.main_process_first(desc="dataset map preprocessing"):
620
+ vectorized_datasets = raw_datasets.map(
621
+ prepare_dataset,
622
+ remove_columns=next(iter(raw_datasets.values())).column_names,
623
+ num_proc=num_workers,
624
+ desc="preprocess datasets",
625
+ )
626
+
627
+ def is_audio_in_length_range(length):
628
+ return length > min_input_length and length < max_input_length
629
+
630
+ # filter data that is shorter than min_input_length
631
+ vectorized_datasets = vectorized_datasets.filter(
632
+ is_audio_in_length_range,
633
+ num_proc=num_workers,
634
+ input_columns=["input_length"],
635
+ )
636
+
637
+ # 7. Next, we can prepare the training.
638
+ # Let's use word error rate (WER) as our evaluation metric,
639
+ # instantiate a data collator and the trainer
640
+
641
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
642
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
643
+
644
+ # for large datasets it is advised to run the preprocessing on a
645
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
646
+ # be a timeout when running the script in distributed mode.
647
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
648
+ # cached dataset
649
+ if data_args.preprocessing_only:
650
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
651
+ return
652
+
653
+ def compute_metrics(pred):
654
+ pred_logits = pred.predictions
655
+ pred_ids = np.argmax(pred_logits, axis=-1)
656
+
657
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
658
+
659
+ pred_str = tokenizer.batch_decode(pred_ids)
660
+ # we do not want to group tokens when computing the metrics
661
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
662
+
663
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
664
+
665
+ return metrics
666
+
667
+ # Now save everything to be able to create a single processor later
668
+ if is_main_process(training_args.local_rank):
669
+ # save feature extractor, tokenizer and config
670
+ feature_extractor.save_pretrained(training_args.output_dir)
671
+ tokenizer.save_pretrained(training_args.output_dir)
672
+ config.save_pretrained(training_args.output_dir)
673
+
674
+ try:
675
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
676
+ except (OSError, KeyError):
677
+ warnings.warn(
678
+ "Loading a processor from a feature extractor config that does not"
679
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
680
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
681
+ " `'processor_class': 'Wav2Vec2Processor'`",
682
+ FutureWarning,
683
+ )
684
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
685
+
686
+ # Instantiate custom data collator
687
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
688
+
689
+ decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm])
690
+ decay_parameters = [name for name in decay_parameters if "bias" not in name]
691
+ optimizer_grouped_parameters = [
692
+ {
693
+ "params": [p for n, p in model.named_parameters() if n in decay_parameters],
694
+ "weight_decay": training_args.weight_decay,
695
+ },
696
+ {
697
+ "params": [p for n, p in model.named_parameters() if n not in decay_parameters],
698
+ "weight_decay": 0.0,
699
+ },
700
+ ]
701
+ optimizer = bnb.optim.Adam8bit(
702
+ params=optimizer_grouped_parameters,
703
+ betas=(training_args.adam_beta1, training_args.adam_beta2),
704
+ eps=training_args.adam_epsilon,
705
+ )
706
+
707
+ optimizers = (optimizer, None)
708
+
709
+ # Initialize Trainer
710
+ trainer = Trainer(
711
+ model=model,
712
+ data_collator=data_collator,
713
+ args=training_args,
714
+ compute_metrics=compute_metrics,
715
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
716
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
717
+ tokenizer=feature_extractor,
718
+ optimizers=optimizers,
719
+ )
720
+
721
+ # 8. Finally, we can start training
722
+
723
+ # Training
724
+ if training_args.do_train:
725
+
726
+ # use last checkpoint if exist
727
+ if last_checkpoint is not None:
728
+ checkpoint = last_checkpoint
729
+ elif os.path.isdir(model_args.model_name_or_path):
730
+ checkpoint = model_args.model_name_or_path
731
+ else:
732
+ checkpoint = None
733
+
734
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
735
+ trainer.save_model()
736
+
737
+ metrics = train_result.metrics
738
+ max_train_samples = (
739
+ data_args.max_train_samples
740
+ if data_args.max_train_samples is not None
741
+ else len(vectorized_datasets["train"])
742
+ )
743
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
744
+
745
+ trainer.log_metrics("train", metrics)
746
+ trainer.save_metrics("train", metrics)
747
+ trainer.save_state()
748
+
749
+ # Evaluation
750
+ results = {}
751
+ if training_args.do_eval:
752
+ logger.info("*** Evaluate ***")
753
+ metrics = trainer.evaluate()
754
+ max_eval_samples = (
755
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
756
+ )
757
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
758
+
759
+ trainer.log_metrics("eval", metrics)
760
+ trainer.save_metrics("eval", metrics)
761
+
762
+ # Write model card and (optionally) push to hub
763
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
764
+ kwargs = {
765
+ "finetuned_from": model_args.model_name_or_path,
766
+ "tasks": "speech-recognition",
767
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
768
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
769
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
770
+ }
771
+ if "common_voice" in data_args.dataset_name:
772
+ kwargs["language"] = config_name
773
+
774
+ if training_args.push_to_hub:
775
+ trainer.push_to_hub(**kwargs)
776
+ else:
777
+ trainer.create_model_card(**kwargs)
778
+
779
+ return results
780
+
781
+
782
+ if __name__ == "__main__":
783
+ main()
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 10.0,
3
- "train_loss": 4.839959698338663,
4
- "train_runtime": 859.0241,
5
- "train_samples": 990,
6
- "train_samples_per_second": 11.525,
7
- "train_steps_per_second": 0.361
8
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "train_loss": 3.968093375272529,
4
+ "train_runtime": 1702.164,
5
+ "train_samples": 11030,
6
+ "train_samples_per_second": 32.4,
7
+ "train_steps_per_second": 0.253
8
  }
trainer_state.json CHANGED
@@ -1,43 +1,25 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 10.0,
5
- "global_step": 310,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 3.23,
12
- "learning_rate": 1.4999999999999999e-05,
13
- "loss": 7.9382,
14
- "step": 100
15
- },
16
- {
17
- "epoch": 6.45,
18
- "learning_rate": 2.9999999999999997e-05,
19
- "loss": 3.6371,
20
- "step": 200
21
- },
22
- {
23
- "epoch": 9.68,
24
- "learning_rate": 4.4999999999999996e-05,
25
- "loss": 3.1273,
26
- "step": 300
27
- },
28
- {
29
- "epoch": 10.0,
30
- "step": 310,
31
- "total_flos": 2.1035435929717018e+18,
32
- "train_loss": 4.839959698338663,
33
- "train_runtime": 859.0241,
34
- "train_samples_per_second": 11.525,
35
- "train_steps_per_second": 0.361
36
  }
37
  ],
38
- "max_steps": 310,
39
- "num_train_epochs": 10,
40
- "total_flos": 2.1035435929717018e+18,
41
  "trial_name": null,
42
  "trial_params": null
43
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.9971014492753625,
5
+ "global_step": 430,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 5.0,
12
+ "step": 430,
13
+ "total_flos": 5.144546715838387e+18,
14
+ "train_loss": 3.968093375272529,
15
+ "train_runtime": 1702.164,
16
+ "train_samples_per_second": 32.4,
17
+ "train_steps_per_second": 0.253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
+ "max_steps": 430,
21
+ "num_train_epochs": 5,
22
+ "total_flos": 5.144546715838387e+18,
23
  "trial_name": null,
24
  "trial_params": null
25
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33cd9da062f12a7cf635924b3a28b3ee76bec843b32219b92685e71d868144d0
3
  size 2991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc71a16af87450f4d2115747ca06fead78d5a9411edbbc77d2195e8cb4ba464
3
  size 2991
vocab.json CHANGED
@@ -1 +1 @@
1
- {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "å": 27, "æ": 28, "è": 29, "ò": 30, "ô": 31, "ø": 32, "|": 0, "[UNK]": 33, "[PAD]": 34}
 
1
+ {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "ä": 27, "å": 28, "é": 29, "ô": 30, "ö": 31, "ü": 32, "“": 33, "”": 34, "|": 0, "[UNK]": 35, "[PAD]": 36}