lucio commited on
Commit
841ba8a
1 Parent(s): 2101ada

Training in progress, step 500

Browse files
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 64,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.25,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.75,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 87,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.16.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 90,
106
+ "xvector_output_dim": 512
107
+ }
.ipynb_checkpoints/eval-checkpoint.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import re
4
+ from typing import Dict
5
+
6
+ from datasets import Audio, Dataset, load_dataset, load_metric
7
+
8
+ from transformers import AutoFeatureExtractor, pipeline
9
+
10
+
11
+ def log_results(result: Dataset, args: Dict[str, str]):
12
+ """DO NOT CHANGE. This function computes and logs the result metrics."""
13
+
14
+ log_outputs = args.log_outputs
15
+ dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
16
+
17
+ # load metric
18
+ wer = load_metric("wer")
19
+ cer = load_metric("cer")
20
+
21
+ # compute metrics
22
+ wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
23
+ cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
24
+
25
+ # print & log results
26
+ result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
27
+ print(result_str)
28
+
29
+ with open(f"{dataset_id}_eval_results.txt", "w") as f:
30
+ f.write(result_str)
31
+
32
+ # log all results in text file. Possibly interesting for analysis
33
+ if log_outputs is not None:
34
+ pred_file = f"log_{dataset_id}_predictions.txt"
35
+ target_file = f"log_{dataset_id}_targets.txt"
36
+
37
+ with open(pred_file, "w") as p, open(target_file, "w") as t:
38
+
39
+ # mapping function to write output
40
+ def write_to_file(batch, i):
41
+ p.write(f"{i}" + "\n")
42
+ p.write(batch["prediction"] + "\n")
43
+ t.write(f"{i}" + "\n")
44
+ t.write(batch["target"] + "\n")
45
+
46
+ result.map(write_to_file, with_indices=True)
47
+
48
+
49
+ def normalize_text(text: str) -> str:
50
+ """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
51
+
52
+ chars_to_ignore_regex = '[!"%,.:;?\\_|©«¬»،؛؟‒–—’“”„…‹›−☺♂�\\\\-]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
53
+
54
+ text = re.sub(chars_to_ignore_regex, "", text.lower())
55
+
56
+ # In addition, we can normalize the target text, e.g. removing new lines characters etc...
57
+ # note that order is important here!
58
+ token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
59
+
60
+ for t in token_sequences_to_ignore:
61
+ text = " ".join(text.split(t))
62
+
63
+ return text
64
+
65
+
66
+ def main(args):
67
+ # load dataset
68
+ dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
69
+
70
+ # for testing: only process the first two examples as a test
71
+ # dataset = dataset.select(range(10))
72
+
73
+ # load processor
74
+ feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
75
+ sampling_rate = feature_extractor.sampling_rate
76
+
77
+ # resample audio
78
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
79
+
80
+ # load eval pipeline
81
+ asr = pipeline("automatic-speech-recognition", model=args.model_id)
82
+
83
+ # map function to decode audio
84
+ def map_to_pred(batch):
85
+ prediction = asr(
86
+ batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
87
+ )
88
+
89
+ batch["prediction"] = prediction["text"]
90
+ batch["target"] = normalize_text(batch["sentence"])
91
+ return batch
92
+
93
+ # run inference on all examples
94
+ result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
95
+
96
+ # compute and log_results
97
+ # do not change function below
98
+ log_results(result, args)
99
+
100
+
101
+ if __name__ == "__main__":
102
+ parser = argparse.ArgumentParser()
103
+
104
+ parser.add_argument(
105
+ "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
106
+ )
107
+ parser.add_argument(
108
+ "--dataset",
109
+ type=str,
110
+ required=True,
111
+ help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
112
+ )
113
+ parser.add_argument(
114
+ "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
115
+ )
116
+ parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
117
+ parser.add_argument(
118
+ "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
119
+ )
120
+ parser.add_argument(
121
+ "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
122
+ )
123
+ parser.add_argument(
124
+ "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
125
+ )
126
+ args = parser.parse_args()
127
+
128
+ main(args)
.ipynb_checkpoints/preprocessor_config-checkpoint.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
.ipynb_checkpoints/run-checkpoint.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python xls-r-uzbek-cv8/run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_8_0" \
3
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
+ --dataset_config_name="uz" \
5
+ --output_dir="./xls-r-uzbek-cv8" \
6
+ --overwrite_output_dir \
7
+ --num_train_epochs="30" \
8
+ --per_device_train_batch_size="8" \
9
+ --per_device_eval_batch_size="8" \
10
+ --gradient_accumulation_steps="4" \
11
+ --learning_rate="1e-4" \
12
+ --warmup_steps="2000" \
13
+ --length_column_name="input_length" \
14
+ --evaluation_strategy="steps" \
15
+ --text_column_name="sentence" \
16
+ --save_steps="500" \
17
+ --eval_steps="500" \
18
+ --logging_steps="100" \
19
+ --layerdrop="0.0" \
20
+ --activation_dropout="0.1" \
21
+ --save_total_limit="3" \
22
+ --freeze_feature_encoder \
23
+ --feat_proj_dropout="0.0" \
24
+ --mask_time_prob="0.75" \
25
+ --mask_time_length="10" \
26
+ --mask_feature_prob="0.25" \
27
+ --mask_feature_length="64" \
28
+ --gradient_checkpointing \
29
+ --use_auth_token \
30
+ --fp16 \
31
+ --group_by_length \
32
+ --do_train --do_eval \
33
+ --push_to_hub
34
+ # --chars_to_ignore \ # default to all punct
.ipynb_checkpoints/run_speech_recognition_ctc-checkpoint.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import string
24
+ import sys
25
+ import unidecode
26
+ import warnings
27
+ from dataclasses import dataclass, field
28
+ from typing import Dict, List, Optional, Union
29
+
30
+ import datasets
31
+ import numpy as np
32
+ import torch
33
+ from datasets import DatasetDict, load_dataset, load_metric
34
+
35
+ import transformers
36
+ from transformers import (
37
+ AutoConfig,
38
+ AutoFeatureExtractor,
39
+ AutoModelForCTC,
40
+ AutoProcessor,
41
+ AutoTokenizer,
42
+ HfArgumentParser,
43
+ Trainer,
44
+ TrainingArguments,
45
+ Wav2Vec2Processor,
46
+ set_seed,
47
+ )
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+ from transformers.utils.versions import require_version
51
+
52
+
53
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
54
+ check_min_version("4.16.0.dev0")
55
+
56
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
57
+
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ def list_field(default=None, metadata=None):
63
+ return field(default_factory=lambda: default, metadata=metadata)
64
+
65
+
66
+ @dataclass
67
+ class ModelArguments:
68
+ """
69
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
70
+ """
71
+
72
+ model_name_or_path: str = field(
73
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
74
+ )
75
+ tokenizer_name_or_path: Optional[str] = field(
76
+ default=None,
77
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
78
+ )
79
+ cache_dir: Optional[str] = field(
80
+ default=None,
81
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
82
+ )
83
+ freeze_feature_encoder: bool = field(
84
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
85
+ )
86
+ attention_dropout: float = field(
87
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
88
+ )
89
+ activation_dropout: float = field(
90
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
91
+ )
92
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
93
+ hidden_dropout: float = field(
94
+ default=0.0,
95
+ metadata={
96
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
97
+ },
98
+ )
99
+ final_dropout: float = field(
100
+ default=0.0,
101
+ metadata={"help": "The dropout probability for the final projection layer."},
102
+ )
103
+ mask_time_prob: float = field(
104
+ default=0.05,
105
+ metadata={
106
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
107
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
108
+ "vectors will be masked along the time axis."
109
+ },
110
+ )
111
+ mask_time_length: int = field(
112
+ default=10,
113
+ metadata={"help": "Length of vector span to mask along the time axis."},
114
+ )
115
+ mask_feature_prob: float = field(
116
+ default=0.0,
117
+ metadata={
118
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
119
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
120
+ },
121
+ )
122
+ mask_feature_length: int = field(
123
+ default=10,
124
+ metadata={"help": "Length of vector span to mask along the feature axis."},
125
+ )
126
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
127
+ ctc_loss_reduction: Optional[str] = field(
128
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
129
+ )
130
+
131
+
132
+ @dataclass
133
+ class DataTrainingArguments:
134
+ """
135
+ Arguments pertaining to what data we are going to input our model for training and eval.
136
+
137
+ Using `HfArgumentParser` we can turn this class
138
+ into argparse arguments to be able to specify them on
139
+ the command line.
140
+ """
141
+
142
+ dataset_name: str = field(
143
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ dataset_config_name: str = field(
146
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ train_split_name: str = field(
149
+ default="train",
150
+ metadata={
151
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
152
+ },
153
+ )
154
+ eval_split_name: str = field(
155
+ default="validation",
156
+ metadata={
157
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
158
+ },
159
+ )
160
+ audio_column_name: str = field(
161
+ default="audio",
162
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
163
+ )
164
+ text_column_name: str = field(
165
+ default="text",
166
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
167
+ )
168
+ overwrite_cache: bool = field(
169
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
170
+ )
171
+ preprocessing_num_workers: Optional[int] = field(
172
+ default=None,
173
+ metadata={"help": "The number of processes to use for the preprocessing."},
174
+ )
175
+ max_train_samples: Optional[int] = field(
176
+ default=None,
177
+ metadata={
178
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
179
+ "value if set."
180
+ },
181
+ )
182
+ max_eval_samples: Optional[int] = field(
183
+ default=None,
184
+ metadata={
185
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
186
+ "value if set."
187
+ },
188
+ )
189
+ chars_to_ignore: Optional[List[str]] = list_field(
190
+ default=None,
191
+ metadata={"help": "A list of characters to remove from the transcripts."},
192
+ )
193
+ eval_metrics: List[str] = list_field(
194
+ default=["wer"],
195
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
196
+ )
197
+ max_duration_in_seconds: float = field(
198
+ default=20.0,
199
+ metadata={
200
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
201
+ },
202
+ )
203
+ min_duration_in_seconds: float = field(
204
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
205
+ )
206
+ preprocessing_only: bool = field(
207
+ default=False,
208
+ metadata={
209
+ "help": "Whether to only do data preprocessing and skip training. "
210
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
211
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
212
+ "so that the cached datasets can consequently be loaded in distributed training"
213
+ },
214
+ )
215
+ use_auth_token: bool = field(
216
+ default=False,
217
+ metadata={
218
+ "help": "If :obj:`True`, will use the token generated when running"
219
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
220
+ },
221
+ )
222
+ unk_token: str = field(
223
+ default="[UNK]",
224
+ metadata={"help": "The unk token for the tokenizer"},
225
+ )
226
+ pad_token: str = field(
227
+ default="[PAD]",
228
+ metadata={"help": "The padding token for the tokenizer"},
229
+ )
230
+ word_delimiter_token: str = field(
231
+ default="|",
232
+ metadata={"help": "The word delimiter token for the tokenizer"},
233
+ )
234
+ phoneme_language: Optional[str] = field(
235
+ default=None,
236
+ metadata={
237
+ "help": "The target language that should be used be"
238
+ " passed to the tokenizer for tokenization. Note that"
239
+ " this is only relevant if the model classifies the"
240
+ " input audio to a sequence of phoneme sequences."
241
+ },
242
+ )
243
+
244
+
245
+ @dataclass
246
+ class DataCollatorCTCWithPadding:
247
+ """
248
+ Data collator that will dynamically pad the inputs received.
249
+ Args:
250
+ processor (:class:`~transformers.AutoProcessor`)
251
+ The processor used for proccessing the data.
252
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
253
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
254
+ among:
255
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
256
+ sequence if provided).
257
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
258
+ maximum acceptable input length for the model if that argument is not provided.
259
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
260
+ different lengths).
261
+ max_length (:obj:`int`, `optional`):
262
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
263
+ max_length_labels (:obj:`int`, `optional`):
264
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
265
+ pad_to_multiple_of (:obj:`int`, `optional`):
266
+ If set will pad the sequence to a multiple of the provided value.
267
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
268
+ 7.5 (Volta).
269
+ """
270
+
271
+ processor: AutoProcessor
272
+ padding: Union[bool, str] = "longest"
273
+ pad_to_multiple_of: Optional[int] = None
274
+ pad_to_multiple_of_labels: Optional[int] = None
275
+
276
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
277
+ # split inputs and labels since they have to be of different lenghts and need
278
+ # different padding methods
279
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
280
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
281
+
282
+ batch = self.processor.pad(
283
+ input_features,
284
+ padding=self.padding,
285
+ pad_to_multiple_of=self.pad_to_multiple_of,
286
+ return_tensors="pt",
287
+ )
288
+
289
+ with self.processor.as_target_processor():
290
+ labels_batch = self.processor.pad(
291
+ label_features,
292
+ padding=self.padding,
293
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
294
+ return_tensors="pt",
295
+ )
296
+
297
+ # replace padding with -100 to ignore loss correctly
298
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
299
+
300
+ batch["labels"] = labels
301
+
302
+ return batch
303
+
304
+
305
+ def create_vocabulary_from_data(
306
+ datasets: DatasetDict,
307
+ word_delimiter_token: Optional[str] = None,
308
+ unk_token: Optional[str] = None,
309
+ pad_token: Optional[str] = None,
310
+ ):
311
+ # Given training and test labels create vocabulary
312
+ def extract_all_chars(batch):
313
+ all_text = " ".join(batch["target_text"])
314
+ vocab = list(set(all_text))
315
+ return {"vocab": [vocab], "all_text": [all_text]}
316
+
317
+ vocabs = datasets.map(
318
+ extract_all_chars,
319
+ batched=True,
320
+ batch_size=-1,
321
+ keep_in_memory=True,
322
+ remove_columns=datasets["train"].column_names,
323
+ )
324
+
325
+ # take union of all unique characters in each dataset
326
+ vocab_set = functools.reduce(
327
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
328
+ )
329
+
330
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
331
+
332
+ # replace white space with delimiter token
333
+ if word_delimiter_token is not None:
334
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
335
+ del vocab_dict[" "]
336
+
337
+ # add unk and pad token
338
+ if unk_token is not None:
339
+ vocab_dict[unk_token] = len(vocab_dict)
340
+
341
+ if pad_token is not None:
342
+ vocab_dict[pad_token] = len(vocab_dict)
343
+
344
+ return vocab_dict
345
+
346
+
347
+ def main():
348
+ # See all possible arguments in src/transformers/training_args.py
349
+ # or by passing the --help flag to this script.
350
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
351
+
352
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
353
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
354
+ # If we pass only one argument to the script and it's the path to a json file,
355
+ # let's parse it to get our arguments.
356
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
357
+ else:
358
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
359
+
360
+ # Detecting last checkpoint.
361
+ last_checkpoint = None
362
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
363
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
364
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
365
+ raise ValueError(
366
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
367
+ "Use --overwrite_output_dir to overcome."
368
+ )
369
+ elif last_checkpoint is not None:
370
+ logger.info(
371
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
372
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
373
+ )
374
+
375
+ # Setup logging
376
+ logging.basicConfig(
377
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
378
+ datefmt="%m/%d/%Y %H:%M:%S",
379
+ handlers=[logging.StreamHandler(sys.stdout)],
380
+ )
381
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
382
+
383
+ # Log on each process the small summary:
384
+ logger.warning(
385
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
386
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
387
+ )
388
+ # Set the verbosity to info of the Transformers logger (on main process only):
389
+ if is_main_process(training_args.local_rank):
390
+ transformers.utils.logging.set_verbosity_info()
391
+ logger.info("Training/evaluation parameters %s", training_args)
392
+
393
+ # Set seed before initializing model.
394
+ set_seed(training_args.seed)
395
+
396
+ # 1. First, let's load the dataset
397
+ raw_datasets = DatasetDict()
398
+
399
+ if training_args.do_train:
400
+ raw_datasets["train"] = load_dataset(
401
+ data_args.dataset_name,
402
+ data_args.dataset_config_name,
403
+ split=data_args.train_split_name,
404
+ use_auth_token=data_args.use_auth_token,
405
+ )
406
+
407
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
408
+ raise ValueError(
409
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
410
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
411
+ f"{', '.join(raw_datasets['train'].column_names)}."
412
+ )
413
+
414
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
415
+ raise ValueError(
416
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
417
+ "Make sure to set `--text_column_name` to the correct text column - one of "
418
+ f"{', '.join(raw_datasets['train'].column_names)}."
419
+ )
420
+
421
+ if data_args.max_train_samples is not None:
422
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
423
+
424
+ if training_args.do_eval:
425
+ raw_datasets["eval"] = load_dataset(
426
+ data_args.dataset_name,
427
+ data_args.dataset_config_name,
428
+ split=data_args.eval_split_name,
429
+ use_auth_token=data_args.use_auth_token,
430
+ )
431
+
432
+ if data_args.max_eval_samples is not None:
433
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
434
+
435
+ # 2. We remove some special characters from the datasets
436
+ # that make training complicated and do not help in transcribing the speech
437
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
438
+ # that could be easily picked up by the model
439
+ if data_args.chars_to_ignore is None:
440
+ chars_to_ignore_regex = f'[{re.escape(string.punctuation)}]'
441
+ else:
442
+ chars_to_ignore_regex = f'[{"".join(data_args.chars_to_ignore)}]'
443
+ print("chars_to_ignore", chars_to_ignore_regex)
444
+ text_column_name = data_args.text_column_name
445
+
446
+ def remove_special_characters(batch):
447
+ if chars_to_ignore_regex is not None:
448
+ batch["target_text"] = re.sub(
449
+ chars_to_ignore_regex,
450
+ "",
451
+ re.sub("([og])['`´]", "\g<1>‘", unidecode.unidecode(batch[text_column_name]).lower())
452
+ ) + " "
453
+ else:
454
+ batch["target_text"] = batch[text_column_name].lower() + " "
455
+ return batch
456
+
457
+ with training_args.main_process_first(desc="dataset map special characters removal"):
458
+ raw_datasets = raw_datasets.map(
459
+ remove_special_characters,
460
+ remove_columns=[text_column_name],
461
+ desc="remove special characters from datasets",
462
+ )
463
+
464
+ num_workers = data_args.preprocessing_num_workers
465
+
466
+ def is_transcript_in_length_range(text):
467
+ return 3 < len(text) < 200
468
+
469
+ raw_datasets = raw_datasets.filter(
470
+ is_transcript_in_length_range,
471
+ num_proc=num_workers,
472
+ input_columns=["target_text"],
473
+ )
474
+
475
+ # save special tokens for tokenizer
476
+ word_delimiter_token = data_args.word_delimiter_token
477
+ unk_token = data_args.unk_token
478
+ pad_token = data_args.pad_token
479
+
480
+ # 3. Next, let's load the config as we might need it to create
481
+ # the tokenizer
482
+ # load config
483
+ config = AutoConfig.from_pretrained(
484
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
485
+ )
486
+
487
+ # 4. Next, if no tokenizer file is defined,
488
+ # we create the vocabulary of the model by extracting all unique characters from
489
+ # the training and evaluation datasets
490
+ # We need to make sure that only first rank saves vocabulary
491
+ # make sure all processes wait until vocab is created
492
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
493
+ tokenizer_kwargs = {}
494
+ if tokenizer_name_or_path is None:
495
+ # save vocab in training output dir
496
+ tokenizer_name_or_path = training_args.output_dir
497
+
498
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
499
+
500
+ with training_args.main_process_first():
501
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
502
+ os.remove(vocab_file)
503
+
504
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
505
+ if not os.path.isfile(vocab_file):
506
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
507
+ vocab_dict = create_vocabulary_from_data(
508
+ raw_datasets,
509
+ word_delimiter_token=word_delimiter_token,
510
+ unk_token=unk_token,
511
+ pad_token=pad_token,
512
+ )
513
+
514
+ # save vocab dict to be loaded into tokenizer
515
+ with open(vocab_file, "w") as file:
516
+ json.dump(vocab_dict, file)
517
+
518
+ # if tokenizer has just been created
519
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
520
+ tokenizer_kwargs = {
521
+ "config": config if config.tokenizer_class is not None else None,
522
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
523
+ "unk_token": unk_token,
524
+ "pad_token": pad_token,
525
+ "word_delimiter_token": word_delimiter_token,
526
+ }
527
+
528
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
529
+ # Note for distributed training, the .from_pretrained methods guarantee that only
530
+ # one local process can concurrently download model & vocab.
531
+
532
+ # load feature_extractor and tokenizer
533
+ tokenizer = AutoTokenizer.from_pretrained(
534
+ tokenizer_name_or_path,
535
+ use_auth_token=data_args.use_auth_token,
536
+ **tokenizer_kwargs,
537
+ )
538
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
539
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
540
+ )
541
+
542
+ # adapt config
543
+ config.update(
544
+ {
545
+ "feat_proj_dropout": model_args.feat_proj_dropout,
546
+ "attention_dropout": model_args.attention_dropout,
547
+ "hidden_dropout": model_args.hidden_dropout,
548
+ "final_dropout": model_args.final_dropout,
549
+ "mask_time_prob": model_args.mask_time_prob,
550
+ "mask_time_length": model_args.mask_time_length,
551
+ "mask_feature_prob": model_args.mask_feature_prob,
552
+ "mask_feature_length": model_args.mask_feature_length,
553
+ "gradient_checkpointing": training_args.gradient_checkpointing,
554
+ "layerdrop": model_args.layerdrop,
555
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
556
+ "pad_token_id": tokenizer.pad_token_id,
557
+ "vocab_size": len(tokenizer),
558
+ "activation_dropout": model_args.activation_dropout,
559
+ }
560
+ )
561
+
562
+ # create model
563
+ model = AutoModelForCTC.from_pretrained(
564
+ model_args.model_name_or_path,
565
+ cache_dir=model_args.cache_dir,
566
+ config=config,
567
+ use_auth_token=data_args.use_auth_token,
568
+ )
569
+
570
+ # freeze encoder
571
+ if model_args.freeze_feature_encoder:
572
+ model.freeze_feature_encoder()
573
+
574
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
575
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
576
+ # so that we just need to set the correct target sampling rate and normalize the input
577
+ # via the `feature_extractor`
578
+
579
+ # make sure that dataset decodes audio with correct sampling rate
580
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
581
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
582
+ raw_datasets = raw_datasets.cast_column(
583
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
584
+ )
585
+
586
+ # derive max & min input length for sample rate & max duration
587
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
588
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
589
+ audio_column_name = data_args.audio_column_name
590
+
591
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
592
+ phoneme_language = data_args.phoneme_language
593
+
594
+ # Preprocessing the datasets.
595
+ # We need to read the audio files as arrays and tokenize the targets.
596
+ def prepare_dataset(batch):
597
+ # load audio
598
+ sample = batch[audio_column_name]
599
+
600
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
601
+ batch["input_values"] = inputs.input_values[0]
602
+ batch["input_length"] = len(batch["input_values"])
603
+
604
+ # encode targets
605
+ additional_kwargs = {}
606
+ if phoneme_language is not None:
607
+ additional_kwargs["phonemizer_lang"] = phoneme_language
608
+
609
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
610
+ return batch
611
+
612
+ with training_args.main_process_first(desc="dataset map preprocessing"):
613
+ vectorized_datasets = raw_datasets.map(
614
+ prepare_dataset,
615
+ remove_columns=next(iter(raw_datasets.values())).column_names,
616
+ num_proc=num_workers,
617
+ desc="preprocess datasets",
618
+ )
619
+
620
+ def is_audio_in_length_range(length):
621
+ return length > min_input_length and length < max_input_length
622
+
623
+ # filter data that is shorter than min_input_length
624
+ vectorized_datasets = vectorized_datasets.filter(
625
+ is_audio_in_length_range,
626
+ num_proc=num_workers,
627
+ input_columns=["input_length"],
628
+ )
629
+
630
+ # 7. Next, we can prepare the training.
631
+ # Let's use word error rate (WER) as our evaluation metric,
632
+ # instantiate a data collator and the trainer
633
+
634
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
635
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
636
+
637
+ # for large datasets it is advised to run the preprocessing on a
638
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
639
+ # be a timeout when running the script in distributed mode.
640
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
641
+ # cached dataset
642
+ if data_args.preprocessing_only:
643
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
644
+ return
645
+
646
+ def compute_metrics(pred):
647
+ pred_logits = pred.predictions
648
+ pred_ids = np.argmax(pred_logits, axis=-1)
649
+
650
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
651
+
652
+ pred_str = tokenizer.batch_decode(pred_ids)
653
+ # we do not want to group tokens when computing the metrics
654
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
655
+
656
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
657
+
658
+ return metrics
659
+
660
+ # Now save everything to be able to create a single processor later
661
+ if is_main_process(training_args.local_rank):
662
+ # save feature extractor, tokenizer and config
663
+ feature_extractor.save_pretrained(training_args.output_dir)
664
+ tokenizer.save_pretrained(training_args.output_dir)
665
+ config.save_pretrained(training_args.output_dir)
666
+
667
+ try:
668
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
669
+ except (OSError, KeyError):
670
+ warnings.warn(
671
+ "Loading a processor from a feature extractor config that does not"
672
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
673
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
674
+ " `'processor_class': 'Wav2Vec2Processor'`",
675
+ FutureWarning,
676
+ )
677
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
678
+
679
+ # Instantiate custom data collator
680
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
681
+
682
+ # Initialize Trainer
683
+ trainer = Trainer(
684
+ model=model,
685
+ data_collator=data_collator,
686
+ args=training_args,
687
+ compute_metrics=compute_metrics,
688
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
689
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
690
+ tokenizer=feature_extractor,
691
+ )
692
+
693
+ # 8. Finally, we can start training
694
+
695
+ # Training
696
+ if training_args.do_train:
697
+
698
+ # use last checkpoint if exist
699
+ if last_checkpoint is not None:
700
+ checkpoint = last_checkpoint
701
+ elif os.path.isdir(model_args.model_name_or_path):
702
+ checkpoint = model_args.model_name_or_path
703
+ else:
704
+ checkpoint = None
705
+
706
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
707
+ trainer.save_model()
708
+
709
+ metrics = train_result.metrics
710
+ max_train_samples = (
711
+ data_args.max_train_samples
712
+ if data_args.max_train_samples is not None
713
+ else len(vectorized_datasets["train"])
714
+ )
715
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
716
+
717
+ trainer.log_metrics("train", metrics)
718
+ trainer.save_metrics("train", metrics)
719
+ trainer.save_state()
720
+
721
+ # Evaluation
722
+ results = {}
723
+ if training_args.do_eval:
724
+ logger.info("*** Evaluate ***")
725
+ metrics = trainer.evaluate()
726
+ max_eval_samples = (
727
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
728
+ )
729
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
730
+
731
+ trainer.log_metrics("eval", metrics)
732
+ trainer.save_metrics("eval", metrics)
733
+
734
+ # Write model card and (optionally) push to hub
735
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
736
+ kwargs = {
737
+ "finetuned_from": model_args.model_name_or_path,
738
+ "tasks": "speech-recognition",
739
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
740
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
741
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
742
+ }
743
+ if "common_voice" in data_args.dataset_name:
744
+ kwargs["language"] = config_name
745
+
746
+ if training_args.push_to_hub:
747
+ trainer.push_to_hub(**kwargs)
748
+ else:
749
+ trainer.create_model_card(**kwargs)
750
+
751
+ return results
752
+
753
+
754
+ if __name__ == "__main__":
755
+ main()
.ipynb_checkpoints/special_tokens_map-checkpoint.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
.ipynb_checkpoints/tokenizer_config-checkpoint.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./xls-r-uzbek-cv8", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
.ipynb_checkpoints/vocab-checkpoint.json CHANGED
@@ -1 +1 @@
1
- {"\\": 1, "_": 2, "`": 3, "a": 4, "b": 5, "c": 6, "d": 7, "e": 8, "f": 9, "g": 10, "h": 11, "i": 12, "j": 13, "k": 14, "l": 15, "m": 16, "n": 17, "o": 18, "p": 19, "q": 20, "r": 21, "s": 22, "t": 23, "u": 24, "v": 25, "w": 26, "x": 27, "y": 28, "z": 29, "|": 0, "\u00a9": 31, "\u00ab": 32, "\u00ac": 33, "\u00b4": 34, "\u00b5": 35, "\u00bb": 36, "\u00eb": 37, "\u00f2": 38, "\u00f3": 39, "\u00f5": 40, "\u00fc": 41, "\u0123": 42, "\u0131": 43, "\u015f": 44, "\u01a3": 45, "\u01b6": 46, "\u0299": 47, "\u02bb": 48, "\u02bc": 49, "\u0430": 50, "\u0431": 51, "\u0432": 52, "\u0433": 53, "\u0434": 54, "\u0435": 55, "\u0436": 56, "\u0437": 57, "\u0438": 58, "\u0439": 59, "\u043a": 60, "\u043b": 61, "\u043c": 62, "\u043d": 63, "\u043e": 64, "\u043f": 65, "\u0440": 66, "\u0441": 67, "\u0442": 68, "\u0443": 69, "\u0445": 70, "\u0447": 71, "\u0448": 72, "\u044a": 73, "\u044b": 74, "\u044c": 75, "\u044d": 76, "\u044e": 77, "\u0451": 78, "\u0458": 79, "\u045e": 80, "\u0493": 81, "\u049b": 82, "\u04b3": 83, "\u04ef": 84, "\u05b9": 85, "\u2012": 86, "\u201e": 87, "\u2022": 88, "\u2212": 89, "\u263a": 90, "\u2642": 91, "\u2705": 92, "\u2714": 93, "\ufe0f": 94, "[UNK]": 94, "[PAD]": 95}
 
1
+ {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "\u2018": 27, "|": 0, "[UNK]": 28, "[PAD]": 29}
added_tokens.json CHANGED
@@ -1 +1 @@
1
- {"<s>": 88, "</s>": 89}
 
1
+ {"<s>": 30, "</s>": 31}
config.json CHANGED
@@ -76,7 +76,7 @@
76
  "num_hidden_layers": 24,
77
  "num_negatives": 100,
78
  "output_hidden_size": 1024,
79
- "pad_token_id": 87,
80
  "proj_codevector_dim": 768,
81
  "tdnn_dilation": [
82
  1,
@@ -102,6 +102,6 @@
102
  "torch_dtype": "float32",
103
  "transformers_version": "4.16.0.dev0",
104
  "use_weighted_layer_sum": false,
105
- "vocab_size": 90,
106
  "xvector_output_dim": 512
107
  }
 
76
  "num_hidden_layers": 24,
77
  "num_negatives": 100,
78
  "output_hidden_size": 1024,
79
+ "pad_token_id": 29,
80
  "proj_codevector_dim": 768,
81
  "tdnn_dilation": [
82
  1,
 
102
  "torch_dtype": "float32",
103
  "transformers_version": "4.16.0.dev0",
104
  "use_weighted_layer_sum": false,
105
+ "vocab_size": 32,
106
  "xvector_output_dim": 512
107
  }
eval.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import re
4
+ from typing import Dict
5
+
6
+ from datasets import Audio, Dataset, load_dataset, load_metric
7
+
8
+ from transformers import AutoFeatureExtractor, pipeline
9
+
10
+
11
+ def log_results(result: Dataset, args: Dict[str, str]):
12
+ """DO NOT CHANGE. This function computes and logs the result metrics."""
13
+
14
+ log_outputs = args.log_outputs
15
+ dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
16
+
17
+ # load metric
18
+ wer = load_metric("wer")
19
+ cer = load_metric("cer")
20
+
21
+ # compute metrics
22
+ wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
23
+ cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
24
+
25
+ # print & log results
26
+ result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
27
+ print(result_str)
28
+
29
+ with open(f"{dataset_id}_eval_results.txt", "w") as f:
30
+ f.write(result_str)
31
+
32
+ # log all results in text file. Possibly interesting for analysis
33
+ if log_outputs is not None:
34
+ pred_file = f"log_{dataset_id}_predictions.txt"
35
+ target_file = f"log_{dataset_id}_targets.txt"
36
+
37
+ with open(pred_file, "w") as p, open(target_file, "w") as t:
38
+
39
+ # mapping function to write output
40
+ def write_to_file(batch, i):
41
+ p.write(f"{i}" + "\n")
42
+ p.write(batch["prediction"] + "\n")
43
+ t.write(f"{i}" + "\n")
44
+ t.write(batch["target"] + "\n")
45
+
46
+ result.map(write_to_file, with_indices=True)
47
+
48
+
49
+ def normalize_text(text: str) -> str:
50
+ """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
51
+
52
+ chars_to_ignore_regex = '[!"%,.:;?\\_|©«¬»،؛؟‒–—’“”„…‹›−☺♂�\\\\-]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
53
+
54
+ text = re.sub(chars_to_ignore_regex, "", text.lower())
55
+
56
+ # In addition, we can normalize the target text, e.g. removing new lines characters etc...
57
+ # note that order is important here!
58
+ token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
59
+
60
+ for t in token_sequences_to_ignore:
61
+ text = " ".join(text.split(t))
62
+
63
+ return text
64
+
65
+
66
+ def main(args):
67
+ # load dataset
68
+ dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
69
+
70
+ # for testing: only process the first two examples as a test
71
+ # dataset = dataset.select(range(10))
72
+
73
+ # load processor
74
+ feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
75
+ sampling_rate = feature_extractor.sampling_rate
76
+
77
+ # resample audio
78
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
79
+
80
+ # load eval pipeline
81
+ asr = pipeline("automatic-speech-recognition", model=args.model_id)
82
+
83
+ # map function to decode audio
84
+ def map_to_pred(batch):
85
+ prediction = asr(
86
+ batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
87
+ )
88
+
89
+ batch["prediction"] = prediction["text"]
90
+ batch["target"] = normalize_text(batch["sentence"])
91
+ return batch
92
+
93
+ # run inference on all examples
94
+ result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
95
+
96
+ # compute and log_results
97
+ # do not change function below
98
+ log_results(result, args)
99
+
100
+
101
+ if __name__ == "__main__":
102
+ parser = argparse.ArgumentParser()
103
+
104
+ parser.add_argument(
105
+ "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
106
+ )
107
+ parser.add_argument(
108
+ "--dataset",
109
+ type=str,
110
+ required=True,
111
+ help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
112
+ )
113
+ parser.add_argument(
114
+ "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
115
+ )
116
+ parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
117
+ parser.add_argument(
118
+ "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
119
+ )
120
+ parser.add_argument(
121
+ "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
122
+ )
123
+ parser.add_argument(
124
+ "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
125
+ )
126
+ args = parser.parse_args()
127
+
128
+ main(args)
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:357f42d66419b6bb188ca1fd716449a27497865d9daee77d522f98cd54485946
3
- size 1262292657
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc40b98cd7c9c2100be9d7ef4a4bcf1850052163f3cabb9cecf6d28b12b2b13
3
+ size 1262054897
run.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python xls-r-uzbek-cv8/run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_8_0" \
3
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
+ --dataset_config_name="uz" \
5
+ --output_dir="./xls-r-uzbek-cv8" \
6
+ --overwrite_output_dir \
7
+ --num_train_epochs="30" \
8
+ --per_device_train_batch_size="8" \
9
+ --per_device_eval_batch_size="8" \
10
+ --gradient_accumulation_steps="4" \
11
+ --learning_rate="1e-4" \
12
+ --warmup_steps="2000" \
13
+ --length_column_name="input_length" \
14
+ --evaluation_strategy="steps" \
15
+ --text_column_name="sentence" \
16
+ --save_steps="500" \
17
+ --eval_steps="500" \
18
+ --logging_steps="100" \
19
+ --layerdrop="0.0" \
20
+ --activation_dropout="0.1" \
21
+ --save_total_limit="3" \
22
+ --freeze_feature_encoder \
23
+ --feat_proj_dropout="0.0" \
24
+ --mask_time_prob="0.75" \
25
+ --mask_time_length="10" \
26
+ --mask_feature_prob="0.25" \
27
+ --mask_feature_length="64" \
28
+ --gradient_checkpointing \
29
+ --use_auth_token \
30
+ --fp16 \
31
+ --group_by_length \
32
+ --do_train --do_eval \
33
+ --push_to_hub
34
+ # --chars_to_ignore \ # default to all punct
run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import string
24
+ import sys
25
+ import unidecode
26
+ import warnings
27
+ from dataclasses import dataclass, field
28
+ from typing import Dict, List, Optional, Union
29
+
30
+ import datasets
31
+ import numpy as np
32
+ import torch
33
+ from datasets import DatasetDict, load_dataset, load_metric
34
+
35
+ import transformers
36
+ from transformers import (
37
+ AutoConfig,
38
+ AutoFeatureExtractor,
39
+ AutoModelForCTC,
40
+ AutoProcessor,
41
+ AutoTokenizer,
42
+ HfArgumentParser,
43
+ Trainer,
44
+ TrainingArguments,
45
+ Wav2Vec2Processor,
46
+ set_seed,
47
+ )
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+ from transformers.utils.versions import require_version
51
+
52
+
53
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
54
+ check_min_version("4.16.0.dev0")
55
+
56
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
57
+
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ def list_field(default=None, metadata=None):
63
+ return field(default_factory=lambda: default, metadata=metadata)
64
+
65
+
66
+ @dataclass
67
+ class ModelArguments:
68
+ """
69
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
70
+ """
71
+
72
+ model_name_or_path: str = field(
73
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
74
+ )
75
+ tokenizer_name_or_path: Optional[str] = field(
76
+ default=None,
77
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
78
+ )
79
+ cache_dir: Optional[str] = field(
80
+ default=None,
81
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
82
+ )
83
+ freeze_feature_encoder: bool = field(
84
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
85
+ )
86
+ attention_dropout: float = field(
87
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
88
+ )
89
+ activation_dropout: float = field(
90
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
91
+ )
92
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
93
+ hidden_dropout: float = field(
94
+ default=0.0,
95
+ metadata={
96
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
97
+ },
98
+ )
99
+ final_dropout: float = field(
100
+ default=0.0,
101
+ metadata={"help": "The dropout probability for the final projection layer."},
102
+ )
103
+ mask_time_prob: float = field(
104
+ default=0.05,
105
+ metadata={
106
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
107
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
108
+ "vectors will be masked along the time axis."
109
+ },
110
+ )
111
+ mask_time_length: int = field(
112
+ default=10,
113
+ metadata={"help": "Length of vector span to mask along the time axis."},
114
+ )
115
+ mask_feature_prob: float = field(
116
+ default=0.0,
117
+ metadata={
118
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
119
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
120
+ },
121
+ )
122
+ mask_feature_length: int = field(
123
+ default=10,
124
+ metadata={"help": "Length of vector span to mask along the feature axis."},
125
+ )
126
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
127
+ ctc_loss_reduction: Optional[str] = field(
128
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
129
+ )
130
+
131
+
132
+ @dataclass
133
+ class DataTrainingArguments:
134
+ """
135
+ Arguments pertaining to what data we are going to input our model for training and eval.
136
+
137
+ Using `HfArgumentParser` we can turn this class
138
+ into argparse arguments to be able to specify them on
139
+ the command line.
140
+ """
141
+
142
+ dataset_name: str = field(
143
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ dataset_config_name: str = field(
146
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ train_split_name: str = field(
149
+ default="train",
150
+ metadata={
151
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
152
+ },
153
+ )
154
+ eval_split_name: str = field(
155
+ default="validation",
156
+ metadata={
157
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
158
+ },
159
+ )
160
+ audio_column_name: str = field(
161
+ default="audio",
162
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
163
+ )
164
+ text_column_name: str = field(
165
+ default="text",
166
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
167
+ )
168
+ overwrite_cache: bool = field(
169
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
170
+ )
171
+ preprocessing_num_workers: Optional[int] = field(
172
+ default=None,
173
+ metadata={"help": "The number of processes to use for the preprocessing."},
174
+ )
175
+ max_train_samples: Optional[int] = field(
176
+ default=None,
177
+ metadata={
178
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
179
+ "value if set."
180
+ },
181
+ )
182
+ max_eval_samples: Optional[int] = field(
183
+ default=None,
184
+ metadata={
185
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
186
+ "value if set."
187
+ },
188
+ )
189
+ chars_to_ignore: Optional[List[str]] = list_field(
190
+ default=None,
191
+ metadata={"help": "A list of characters to remove from the transcripts."},
192
+ )
193
+ eval_metrics: List[str] = list_field(
194
+ default=["wer"],
195
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
196
+ )
197
+ max_duration_in_seconds: float = field(
198
+ default=20.0,
199
+ metadata={
200
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
201
+ },
202
+ )
203
+ min_duration_in_seconds: float = field(
204
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
205
+ )
206
+ preprocessing_only: bool = field(
207
+ default=False,
208
+ metadata={
209
+ "help": "Whether to only do data preprocessing and skip training. "
210
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
211
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
212
+ "so that the cached datasets can consequently be loaded in distributed training"
213
+ },
214
+ )
215
+ use_auth_token: bool = field(
216
+ default=False,
217
+ metadata={
218
+ "help": "If :obj:`True`, will use the token generated when running"
219
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
220
+ },
221
+ )
222
+ unk_token: str = field(
223
+ default="[UNK]",
224
+ metadata={"help": "The unk token for the tokenizer"},
225
+ )
226
+ pad_token: str = field(
227
+ default="[PAD]",
228
+ metadata={"help": "The padding token for the tokenizer"},
229
+ )
230
+ word_delimiter_token: str = field(
231
+ default="|",
232
+ metadata={"help": "The word delimiter token for the tokenizer"},
233
+ )
234
+ phoneme_language: Optional[str] = field(
235
+ default=None,
236
+ metadata={
237
+ "help": "The target language that should be used be"
238
+ " passed to the tokenizer for tokenization. Note that"
239
+ " this is only relevant if the model classifies the"
240
+ " input audio to a sequence of phoneme sequences."
241
+ },
242
+ )
243
+
244
+
245
+ @dataclass
246
+ class DataCollatorCTCWithPadding:
247
+ """
248
+ Data collator that will dynamically pad the inputs received.
249
+ Args:
250
+ processor (:class:`~transformers.AutoProcessor`)
251
+ The processor used for proccessing the data.
252
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
253
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
254
+ among:
255
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
256
+ sequence if provided).
257
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
258
+ maximum acceptable input length for the model if that argument is not provided.
259
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
260
+ different lengths).
261
+ max_length (:obj:`int`, `optional`):
262
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
263
+ max_length_labels (:obj:`int`, `optional`):
264
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
265
+ pad_to_multiple_of (:obj:`int`, `optional`):
266
+ If set will pad the sequence to a multiple of the provided value.
267
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
268
+ 7.5 (Volta).
269
+ """
270
+
271
+ processor: AutoProcessor
272
+ padding: Union[bool, str] = "longest"
273
+ pad_to_multiple_of: Optional[int] = None
274
+ pad_to_multiple_of_labels: Optional[int] = None
275
+
276
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
277
+ # split inputs and labels since they have to be of different lenghts and need
278
+ # different padding methods
279
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
280
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
281
+
282
+ batch = self.processor.pad(
283
+ input_features,
284
+ padding=self.padding,
285
+ pad_to_multiple_of=self.pad_to_multiple_of,
286
+ return_tensors="pt",
287
+ )
288
+
289
+ with self.processor.as_target_processor():
290
+ labels_batch = self.processor.pad(
291
+ label_features,
292
+ padding=self.padding,
293
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
294
+ return_tensors="pt",
295
+ )
296
+
297
+ # replace padding with -100 to ignore loss correctly
298
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
299
+
300
+ batch["labels"] = labels
301
+
302
+ return batch
303
+
304
+
305
+ def create_vocabulary_from_data(
306
+ datasets: DatasetDict,
307
+ word_delimiter_token: Optional[str] = None,
308
+ unk_token: Optional[str] = None,
309
+ pad_token: Optional[str] = None,
310
+ ):
311
+ # Given training and test labels create vocabulary
312
+ def extract_all_chars(batch):
313
+ all_text = " ".join(batch["target_text"])
314
+ vocab = list(set(all_text))
315
+ return {"vocab": [vocab], "all_text": [all_text]}
316
+
317
+ vocabs = datasets.map(
318
+ extract_all_chars,
319
+ batched=True,
320
+ batch_size=-1,
321
+ keep_in_memory=True,
322
+ remove_columns=datasets["train"].column_names,
323
+ )
324
+
325
+ # take union of all unique characters in each dataset
326
+ vocab_set = functools.reduce(
327
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
328
+ )
329
+
330
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
331
+
332
+ # replace white space with delimiter token
333
+ if word_delimiter_token is not None:
334
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
335
+ del vocab_dict[" "]
336
+
337
+ # add unk and pad token
338
+ if unk_token is not None:
339
+ vocab_dict[unk_token] = len(vocab_dict)
340
+
341
+ if pad_token is not None:
342
+ vocab_dict[pad_token] = len(vocab_dict)
343
+
344
+ return vocab_dict
345
+
346
+
347
+ def main():
348
+ # See all possible arguments in src/transformers/training_args.py
349
+ # or by passing the --help flag to this script.
350
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
351
+
352
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
353
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
354
+ # If we pass only one argument to the script and it's the path to a json file,
355
+ # let's parse it to get our arguments.
356
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
357
+ else:
358
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
359
+
360
+ # Detecting last checkpoint.
361
+ last_checkpoint = None
362
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
363
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
364
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
365
+ raise ValueError(
366
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
367
+ "Use --overwrite_output_dir to overcome."
368
+ )
369
+ elif last_checkpoint is not None:
370
+ logger.info(
371
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
372
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
373
+ )
374
+
375
+ # Setup logging
376
+ logging.basicConfig(
377
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
378
+ datefmt="%m/%d/%Y %H:%M:%S",
379
+ handlers=[logging.StreamHandler(sys.stdout)],
380
+ )
381
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
382
+
383
+ # Log on each process the small summary:
384
+ logger.warning(
385
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
386
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
387
+ )
388
+ # Set the verbosity to info of the Transformers logger (on main process only):
389
+ if is_main_process(training_args.local_rank):
390
+ transformers.utils.logging.set_verbosity_info()
391
+ logger.info("Training/evaluation parameters %s", training_args)
392
+
393
+ # Set seed before initializing model.
394
+ set_seed(training_args.seed)
395
+
396
+ # 1. First, let's load the dataset
397
+ raw_datasets = DatasetDict()
398
+
399
+ if training_args.do_train:
400
+ raw_datasets["train"] = load_dataset(
401
+ data_args.dataset_name,
402
+ data_args.dataset_config_name,
403
+ split=data_args.train_split_name,
404
+ use_auth_token=data_args.use_auth_token,
405
+ )
406
+
407
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
408
+ raise ValueError(
409
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
410
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
411
+ f"{', '.join(raw_datasets['train'].column_names)}."
412
+ )
413
+
414
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
415
+ raise ValueError(
416
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
417
+ "Make sure to set `--text_column_name` to the correct text column - one of "
418
+ f"{', '.join(raw_datasets['train'].column_names)}."
419
+ )
420
+
421
+ if data_args.max_train_samples is not None:
422
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
423
+
424
+ if training_args.do_eval:
425
+ raw_datasets["eval"] = load_dataset(
426
+ data_args.dataset_name,
427
+ data_args.dataset_config_name,
428
+ split=data_args.eval_split_name,
429
+ use_auth_token=data_args.use_auth_token,
430
+ )
431
+
432
+ if data_args.max_eval_samples is not None:
433
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
434
+
435
+ # 2. We remove some special characters from the datasets
436
+ # that make training complicated and do not help in transcribing the speech
437
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
438
+ # that could be easily picked up by the model
439
+ if data_args.chars_to_ignore is None:
440
+ chars_to_ignore_regex = f'[{re.escape(string.punctuation)}]'
441
+ else:
442
+ chars_to_ignore_regex = f'[{"".join(data_args.chars_to_ignore)}]'
443
+ print("chars_to_ignore", chars_to_ignore_regex)
444
+ text_column_name = data_args.text_column_name
445
+
446
+ def remove_special_characters(batch):
447
+ if chars_to_ignore_regex is not None:
448
+ batch["target_text"] = re.sub(
449
+ chars_to_ignore_regex,
450
+ "",
451
+ re.sub("([og])['`´]", "\g<1>‘", unidecode.unidecode(batch[text_column_name]).lower())
452
+ ) + " "
453
+ else:
454
+ batch["target_text"] = batch[text_column_name].lower() + " "
455
+ return batch
456
+
457
+ with training_args.main_process_first(desc="dataset map special characters removal"):
458
+ raw_datasets = raw_datasets.map(
459
+ remove_special_characters,
460
+ remove_columns=[text_column_name],
461
+ desc="remove special characters from datasets",
462
+ )
463
+
464
+ num_workers = data_args.preprocessing_num_workers
465
+
466
+ def is_transcript_in_length_range(text):
467
+ return 3 < len(text) < 200
468
+
469
+ raw_datasets = raw_datasets.filter(
470
+ is_transcript_in_length_range,
471
+ num_proc=num_workers,
472
+ input_columns=["target_text"],
473
+ )
474
+
475
+ # save special tokens for tokenizer
476
+ word_delimiter_token = data_args.word_delimiter_token
477
+ unk_token = data_args.unk_token
478
+ pad_token = data_args.pad_token
479
+
480
+ # 3. Next, let's load the config as we might need it to create
481
+ # the tokenizer
482
+ # load config
483
+ config = AutoConfig.from_pretrained(
484
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
485
+ )
486
+
487
+ # 4. Next, if no tokenizer file is defined,
488
+ # we create the vocabulary of the model by extracting all unique characters from
489
+ # the training and evaluation datasets
490
+ # We need to make sure that only first rank saves vocabulary
491
+ # make sure all processes wait until vocab is created
492
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
493
+ tokenizer_kwargs = {}
494
+ if tokenizer_name_or_path is None:
495
+ # save vocab in training output dir
496
+ tokenizer_name_or_path = training_args.output_dir
497
+
498
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
499
+
500
+ with training_args.main_process_first():
501
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
502
+ os.remove(vocab_file)
503
+
504
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
505
+ if not os.path.isfile(vocab_file):
506
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
507
+ vocab_dict = create_vocabulary_from_data(
508
+ raw_datasets,
509
+ word_delimiter_token=word_delimiter_token,
510
+ unk_token=unk_token,
511
+ pad_token=pad_token,
512
+ )
513
+
514
+ # save vocab dict to be loaded into tokenizer
515
+ with open(vocab_file, "w") as file:
516
+ json.dump(vocab_dict, file)
517
+
518
+ # if tokenizer has just been created
519
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
520
+ tokenizer_kwargs = {
521
+ "config": config if config.tokenizer_class is not None else None,
522
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
523
+ "unk_token": unk_token,
524
+ "pad_token": pad_token,
525
+ "word_delimiter_token": word_delimiter_token,
526
+ }
527
+
528
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
529
+ # Note for distributed training, the .from_pretrained methods guarantee that only
530
+ # one local process can concurrently download model & vocab.
531
+
532
+ # load feature_extractor and tokenizer
533
+ tokenizer = AutoTokenizer.from_pretrained(
534
+ tokenizer_name_or_path,
535
+ use_auth_token=data_args.use_auth_token,
536
+ **tokenizer_kwargs,
537
+ )
538
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
539
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
540
+ )
541
+
542
+ # adapt config
543
+ config.update(
544
+ {
545
+ "feat_proj_dropout": model_args.feat_proj_dropout,
546
+ "attention_dropout": model_args.attention_dropout,
547
+ "hidden_dropout": model_args.hidden_dropout,
548
+ "final_dropout": model_args.final_dropout,
549
+ "mask_time_prob": model_args.mask_time_prob,
550
+ "mask_time_length": model_args.mask_time_length,
551
+ "mask_feature_prob": model_args.mask_feature_prob,
552
+ "mask_feature_length": model_args.mask_feature_length,
553
+ "gradient_checkpointing": training_args.gradient_checkpointing,
554
+ "layerdrop": model_args.layerdrop,
555
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
556
+ "pad_token_id": tokenizer.pad_token_id,
557
+ "vocab_size": len(tokenizer),
558
+ "activation_dropout": model_args.activation_dropout,
559
+ }
560
+ )
561
+
562
+ # create model
563
+ model = AutoModelForCTC.from_pretrained(
564
+ model_args.model_name_or_path,
565
+ cache_dir=model_args.cache_dir,
566
+ config=config,
567
+ use_auth_token=data_args.use_auth_token,
568
+ )
569
+
570
+ # freeze encoder
571
+ if model_args.freeze_feature_encoder:
572
+ model.freeze_feature_encoder()
573
+
574
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
575
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
576
+ # so that we just need to set the correct target sampling rate and normalize the input
577
+ # via the `feature_extractor`
578
+
579
+ # make sure that dataset decodes audio with correct sampling rate
580
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
581
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
582
+ raw_datasets = raw_datasets.cast_column(
583
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
584
+ )
585
+
586
+ # derive max & min input length for sample rate & max duration
587
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
588
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
589
+ audio_column_name = data_args.audio_column_name
590
+
591
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
592
+ phoneme_language = data_args.phoneme_language
593
+
594
+ # Preprocessing the datasets.
595
+ # We need to read the audio files as arrays and tokenize the targets.
596
+ def prepare_dataset(batch):
597
+ # load audio
598
+ sample = batch[audio_column_name]
599
+
600
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
601
+ batch["input_values"] = inputs.input_values[0]
602
+ batch["input_length"] = len(batch["input_values"])
603
+
604
+ # encode targets
605
+ additional_kwargs = {}
606
+ if phoneme_language is not None:
607
+ additional_kwargs["phonemizer_lang"] = phoneme_language
608
+
609
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
610
+ return batch
611
+
612
+ with training_args.main_process_first(desc="dataset map preprocessing"):
613
+ vectorized_datasets = raw_datasets.map(
614
+ prepare_dataset,
615
+ remove_columns=next(iter(raw_datasets.values())).column_names,
616
+ num_proc=num_workers,
617
+ desc="preprocess datasets",
618
+ )
619
+
620
+ def is_audio_in_length_range(length):
621
+ return length > min_input_length and length < max_input_length
622
+
623
+ # filter data that is shorter than min_input_length
624
+ vectorized_datasets = vectorized_datasets.filter(
625
+ is_audio_in_length_range,
626
+ num_proc=num_workers,
627
+ input_columns=["input_length"],
628
+ )
629
+
630
+ # 7. Next, we can prepare the training.
631
+ # Let's use word error rate (WER) as our evaluation metric,
632
+ # instantiate a data collator and the trainer
633
+
634
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
635
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
636
+
637
+ # for large datasets it is advised to run the preprocessing on a
638
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
639
+ # be a timeout when running the script in distributed mode.
640
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
641
+ # cached dataset
642
+ if data_args.preprocessing_only:
643
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
644
+ return
645
+
646
+ def compute_metrics(pred):
647
+ pred_logits = pred.predictions
648
+ pred_ids = np.argmax(pred_logits, axis=-1)
649
+
650
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
651
+
652
+ pred_str = tokenizer.batch_decode(pred_ids)
653
+ # we do not want to group tokens when computing the metrics
654
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
655
+
656
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
657
+
658
+ return metrics
659
+
660
+ # Now save everything to be able to create a single processor later
661
+ if is_main_process(training_args.local_rank):
662
+ # save feature extractor, tokenizer and config
663
+ feature_extractor.save_pretrained(training_args.output_dir)
664
+ tokenizer.save_pretrained(training_args.output_dir)
665
+ config.save_pretrained(training_args.output_dir)
666
+
667
+ try:
668
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
669
+ except (OSError, KeyError):
670
+ warnings.warn(
671
+ "Loading a processor from a feature extractor config that does not"
672
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
673
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
674
+ " `'processor_class': 'Wav2Vec2Processor'`",
675
+ FutureWarning,
676
+ )
677
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
678
+
679
+ # Instantiate custom data collator
680
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
681
+
682
+ # Initialize Trainer
683
+ trainer = Trainer(
684
+ model=model,
685
+ data_collator=data_collator,
686
+ args=training_args,
687
+ compute_metrics=compute_metrics,
688
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
689
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
690
+ tokenizer=feature_extractor,
691
+ )
692
+
693
+ # 8. Finally, we can start training
694
+
695
+ # Training
696
+ if training_args.do_train:
697
+
698
+ # use last checkpoint if exist
699
+ if last_checkpoint is not None:
700
+ checkpoint = last_checkpoint
701
+ elif os.path.isdir(model_args.model_name_or_path):
702
+ checkpoint = model_args.model_name_or_path
703
+ else:
704
+ checkpoint = None
705
+
706
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
707
+ trainer.save_model()
708
+
709
+ metrics = train_result.metrics
710
+ max_train_samples = (
711
+ data_args.max_train_samples
712
+ if data_args.max_train_samples is not None
713
+ else len(vectorized_datasets["train"])
714
+ )
715
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
716
+
717
+ trainer.log_metrics("train", metrics)
718
+ trainer.save_metrics("train", metrics)
719
+ trainer.save_state()
720
+
721
+ # Evaluation
722
+ results = {}
723
+ if training_args.do_eval:
724
+ logger.info("*** Evaluate ***")
725
+ metrics = trainer.evaluate()
726
+ max_eval_samples = (
727
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
728
+ )
729
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
730
+
731
+ trainer.log_metrics("eval", metrics)
732
+ trainer.save_metrics("eval", metrics)
733
+
734
+ # Write model card and (optionally) push to hub
735
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
736
+ kwargs = {
737
+ "finetuned_from": model_args.model_name_or_path,
738
+ "tasks": "speech-recognition",
739
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
740
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
741
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
742
+ }
743
+ if "common_voice" in data_args.dataset_name:
744
+ kwargs["language"] = config_name
745
+
746
+ if training_args.push_to_hub:
747
+ trainer.push_to_hub(**kwargs)
748
+ else:
749
+ trainer.create_model_card(**kwargs)
750
+
751
+ return results
752
+
753
+
754
+ if __name__ == "__main__":
755
+ main()
runs/Jan30_19-35-25_job-0074bb36-c67f-4775-b1b6-176eb09b0ba4/1643572438.487491/events.out.tfevents.1643572438.job-0074bb36-c67f-4775-b1b6-176eb09b0ba4.2037878.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5565612f8d41fa8a11a0ff879bb9268a3cc394bec0033e9f2624a926f2164d6
3
+ size 4799
runs/Jan30_19-35-25_job-0074bb36-c67f-4775-b1b6-176eb09b0ba4/events.out.tfevents.1643572438.job-0074bb36-c67f-4775-b1b6-176eb09b0ba4.2037878.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a9742e7fb57cd97d03100bf8cafb642683943fd17ef3c11e82df96695142301
3
+ size 5509
runs/Jan31_00-08-55_job-0074bb36-c67f-4775-b1b6-176eb09b0ba4/1643588110.005454/events.out.tfevents.1643588110.job-0074bb36-c67f-4775-b1b6-176eb09b0ba4.2141134.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b1994a482846ce7060181c6a5916aa08eae4df7f5c9d105a3db0f5ff1407a63
3
+ size 4799
runs/Jan31_00-08-55_job-0074bb36-c67f-4775-b1b6-176eb09b0ba4/events.out.tfevents.1643588109.job-0074bb36-c67f-4775-b1b6-176eb09b0ba4.2141134.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:359c54472ff5f759dd075afb665d3dcb48ff459b6536ba234636aebedf37bad3
3
+ size 5827
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d3ac319b50903e646475f388bad406b8c6f443d9802aab8836dee28614a8358
3
  size 3055
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f63ab537861ab3603897d5e7cf4fb3eced732620c40b32b82ca2af035896ab
3
  size 3055
vocab.json CHANGED
@@ -1 +1 @@
1
- {"\\": 1, "`": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "´": 29, "µ": 30, "ë": 31, "ò": 32, "ó": 33, "õ": 34, "ü": 35, "ģ": 36, "ı": 37, "ş": 38, "ƣ": 39, "ƶ": 40, "ʙ": 41, "ʻ": 42, "ʼ": 43, "а": 44, "б": 45, "в": 46, "г": 47, "д": 48, "е": 49, "ж": 50, "з": 51, "и": 52, "й": 53, "к": 54, "л": 55, "м": 56, "н": 57, "о": 58, "п": 59, "р": 60, "с": 61, "т": 62, "у": 63, "х": 64, "ч": 65, "ш": 66, "ъ": 67, "ы": 68, "ь": 69, "э": 70, "ю": 71, "ё": 72, "ј": 73, "ў": 74, "ғ": 75, "қ": 76, "ҳ": 77, "ӯ": 78, "ֹ": 79, "‘": 80, "•": 81, "−": 82, "✅": 83, "✔": 84, "️": 85, "|": 0, "[UNK]": 86, "[PAD]": 87}
 
1
+ {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "": 27, "|": 0, "[UNK]": 28, "[PAD]": 29}