chmanoj commited on
Commit
39b76ad
1 Parent(s): e071349

End of training

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 77, "</s>": 78}
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 3.5509767532348633,
4
+ "eval_runtime": 81.5135,
5
+ "eval_samples": 1112,
6
+ "eval_samples_per_second": 13.642,
7
+ "eval_steps_per_second": 3.41,
8
+ "eval_wer": 1.0,
9
+ "train_loss": 9.413687669313871,
10
+ "train_runtime": 890.0468,
11
+ "train_samples": 3336,
12
+ "train_samples_per_second": 7.496,
13
+ "train_steps_per_second": 0.234
14
+ }
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 64,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.25,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.75,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 76,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.16.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 79,
106
+ "xvector_output_dim": 512
107
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 3.5509767532348633,
4
+ "eval_runtime": 81.5135,
5
+ "eval_samples": 1112,
6
+ "eval_samples_per_second": 13.642,
7
+ "eval_steps_per_second": 3.41,
8
+ "eval_wer": 1.0
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2aea33d1b01590d19758b30cc515e3616c38af1b82a7641d8bc77e97ddd8da76
3
+ size 1262247537
run_bnb.sh ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc_bnb.py \
2
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
3
+ --dataset_name="openslr_SLR66" \
4
+ --train_split_name="train" \
5
+ --preprocessing_num_workers="8" \
6
+ --output_dir="./" \
7
+ --overwrite_output_dir \
8
+ --num_train_epochs="2" \
9
+ --per_device_train_batch_size="4" \
10
+ --per_device_eval_batch_size="4" \
11
+ --gradient_accumulation_steps="8" \
12
+ --learning_rate="7.5e-5" \
13
+ --warmup_steps="2000" \
14
+ --length_column_name="input_length" \
15
+ --evaluation_strategy="steps" \
16
+ --text_column_name="sentence" \
17
+ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
18
+ --save_steps="500" \
19
+ --eval_steps="500" \
20
+ --logging_steps="100" \
21
+ --layerdrop="0.0" \
22
+ --activation_dropout="0.1" \
23
+ --save_total_limit="3" \
24
+ --freeze_feature_encoder \
25
+ --feat_proj_dropout="0.0" \
26
+ --mask_time_prob="0.75" \
27
+ --mask_time_length="10" \
28
+ --mask_feature_prob="0.25" \
29
+ --mask_feature_length="64" \
30
+ --gradient_checkpointing \
31
+ --use_auth_token \
32
+ --fp16 \
33
+ --group_by_length \
34
+ --do_train --do_eval \
35
+ --push_to_hub
run_speech_recognition_ctc_bnb.py ADDED
@@ -0,0 +1,779 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import bitsandbytes as bnb
34
+ import transformers
35
+ from transformers import (
36
+ AutoConfig,
37
+ AutoFeatureExtractor,
38
+ AutoModelForCTC,
39
+ AutoProcessor,
40
+ AutoTokenizer,
41
+ HfArgumentParser,
42
+ Trainer,
43
+ TrainingArguments,
44
+ Wav2Vec2Processor,
45
+ set_seed,
46
+ )
47
+ from transformers.trainer_pt_utils import get_parameter_names
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+ from transformers.utils.versions import require_version
51
+
52
+
53
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
54
+ check_min_version("4.16.0.dev0")
55
+
56
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
57
+
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ def list_field(default=None, metadata=None):
63
+ return field(default_factory=lambda: default, metadata=metadata)
64
+
65
+ def get_telugu_dataset(validation_split=False):
66
+ dataset = load_dataset('openslr', 'SLR66')
67
+
68
+ seed=1242
69
+
70
+ if validation_split:
71
+ train_testvalid = dataset['train'].train_test_split(test_size=0.2, seed=seed)
72
+ # Split the 10% test + valid in half test, half valid
73
+ test_valid = train_testvalid['test'].train_test_split(test_size=0.33, seed=seed)
74
+ # gather everyone if you want to have a single DatasetDict
75
+ out_dataset = DatasetDict({
76
+ 'train': train_testvalid['train'],
77
+ 'test': test_valid['test'],
78
+ 'valid': test_valid['train']})
79
+ else:
80
+ train_testvalid = dataset['train'].train_test_split(test_size=0.25, seed=seed)
81
+ out_dataset = DatasetDict({
82
+ 'train': train_testvalid['train'],
83
+ 'test': train_testvalid['test']})
84
+ return out_dataset
85
+
86
+
87
+ @dataclass
88
+ class ModelArguments:
89
+ """
90
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
91
+ """
92
+
93
+ model_name_or_path: str = field(
94
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
95
+ )
96
+ tokenizer_name_or_path: Optional[str] = field(
97
+ default=None,
98
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
99
+ )
100
+ cache_dir: Optional[str] = field(
101
+ default=None,
102
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
103
+ )
104
+ freeze_feature_encoder: bool = field(
105
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
106
+ )
107
+ attention_dropout: float = field(
108
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
109
+ )
110
+ activation_dropout: float = field(
111
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
112
+ )
113
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
114
+ hidden_dropout: float = field(
115
+ default=0.0,
116
+ metadata={
117
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
118
+ },
119
+ )
120
+ final_dropout: float = field(
121
+ default=0.0,
122
+ metadata={"help": "The dropout probability for the final projection layer."},
123
+ )
124
+ mask_time_prob: float = field(
125
+ default=0.05,
126
+ metadata={
127
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
128
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
129
+ "vectors will be masked along the time axis."
130
+ },
131
+ )
132
+ mask_time_length: int = field(
133
+ default=10,
134
+ metadata={"help": "Length of vector span to mask along the time axis."},
135
+ )
136
+ mask_feature_prob: float = field(
137
+ default=0.0,
138
+ metadata={
139
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
140
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
141
+ },
142
+ )
143
+ mask_feature_length: int = field(
144
+ default=10,
145
+ metadata={"help": "Length of vector span to mask along the feature axis."},
146
+ )
147
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
148
+ ctc_loss_reduction: Optional[str] = field(
149
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
150
+ )
151
+
152
+
153
+ @dataclass
154
+ class DataTrainingArguments:
155
+ """
156
+ Arguments pertaining to what data we are going to input our model for training and eval.
157
+
158
+ Using `HfArgumentParser` we can turn this class
159
+ into argparse arguments to be able to specify them on
160
+ the command line.
161
+ """
162
+
163
+ dataset_name: str = field(
164
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
165
+ )
166
+ dataset_config_name: str = field(
167
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
168
+ )
169
+ train_split_name: str = field(
170
+ default="train+validation",
171
+ metadata={
172
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
173
+ },
174
+ )
175
+ eval_split_name: str = field(
176
+ default="test",
177
+ metadata={
178
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
179
+ },
180
+ )
181
+ audio_column_name: str = field(
182
+ default="audio",
183
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
184
+ )
185
+ text_column_name: str = field(
186
+ default="text",
187
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
188
+ )
189
+ overwrite_cache: bool = field(
190
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
191
+ )
192
+ preprocessing_num_workers: Optional[int] = field(
193
+ default=None,
194
+ metadata={"help": "The number of processes to use for the preprocessing."},
195
+ )
196
+ max_train_samples: Optional[int] = field(
197
+ default=None,
198
+ metadata={
199
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
200
+ "value if set."
201
+ },
202
+ )
203
+ max_eval_samples: Optional[int] = field(
204
+ default=None,
205
+ metadata={
206
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
207
+ "value if set."
208
+ },
209
+ )
210
+ chars_to_ignore: Optional[List[str]] = list_field(
211
+ default=None,
212
+ metadata={"help": "A list of characters to remove from the transcripts."},
213
+ )
214
+ eval_metrics: List[str] = list_field(
215
+ default=["wer"],
216
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
217
+ )
218
+ max_duration_in_seconds: float = field(
219
+ default=20.0,
220
+ metadata={
221
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
222
+ },
223
+ )
224
+ min_duration_in_seconds: float = field(
225
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
226
+ )
227
+ preprocessing_only: bool = field(
228
+ default=False,
229
+ metadata={
230
+ "help": "Whether to only do data preprocessing and skip training. "
231
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
232
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
233
+ "so that the cached datasets can consequently be loaded in distributed training"
234
+ },
235
+ )
236
+ use_auth_token: bool = field(
237
+ default=False,
238
+ metadata={
239
+ "help": "If :obj:`True`, will use the token generated when running"
240
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
241
+ },
242
+ )
243
+ unk_token: str = field(
244
+ default="[UNK]",
245
+ metadata={"help": "The unk token for the tokenizer"},
246
+ )
247
+ pad_token: str = field(
248
+ default="[PAD]",
249
+ metadata={"help": "The padding token for the tokenizer"},
250
+ )
251
+ word_delimiter_token: str = field(
252
+ default="|",
253
+ metadata={"help": "The word delimiter token for the tokenizer"},
254
+ )
255
+ phoneme_language: Optional[str] = field(
256
+ default=None,
257
+ metadata={
258
+ "help": "The target language that should be used be"
259
+ " passed to the tokenizer for tokenization. Note that"
260
+ " this is only relevant if the model classifies the"
261
+ " input audio to a sequence of phoneme sequences."
262
+ },
263
+ )
264
+
265
+
266
+ @dataclass
267
+ class DataCollatorCTCWithPadding:
268
+ """
269
+ Data collator that will dynamically pad the inputs received.
270
+ Args:
271
+ processor (:class:`~transformers.AutoProcessor`)
272
+ The processor used for proccessing the data.
273
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
274
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
275
+ among:
276
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
277
+ sequence if provided).
278
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
279
+ maximum acceptable input length for the model if that argument is not provided.
280
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
281
+ different lengths).
282
+ max_length (:obj:`int`, `optional`):
283
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
284
+ max_length_labels (:obj:`int`, `optional`):
285
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
286
+ pad_to_multiple_of (:obj:`int`, `optional`):
287
+ If set will pad the sequence to a multiple of the provided value.
288
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
289
+ 7.5 (Volta).
290
+ """
291
+
292
+ processor: AutoProcessor
293
+ padding: Union[bool, str] = "longest"
294
+ pad_to_multiple_of: Optional[int] = None
295
+ pad_to_multiple_of_labels: Optional[int] = None
296
+
297
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
298
+ # split inputs and labels since they have to be of different lenghts and need
299
+ # different padding methods
300
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
301
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
302
+
303
+ batch = self.processor.pad(
304
+ input_features,
305
+ padding=self.padding,
306
+ pad_to_multiple_of=self.pad_to_multiple_of,
307
+ return_tensors="pt",
308
+ )
309
+
310
+ with self.processor.as_target_processor():
311
+ labels_batch = self.processor.pad(
312
+ label_features,
313
+ padding=self.padding,
314
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
315
+ return_tensors="pt",
316
+ )
317
+
318
+ # replace padding with -100 to ignore loss correctly
319
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
320
+
321
+ batch["labels"] = labels
322
+
323
+ return batch
324
+
325
+
326
+ def create_vocabulary_from_data(
327
+ datasets: DatasetDict,
328
+ word_delimiter_token: Optional[str] = None,
329
+ unk_token: Optional[str] = None,
330
+ pad_token: Optional[str] = None,
331
+ ):
332
+ # Given training and test labels create vocabulary
333
+ def extract_all_chars(batch):
334
+ all_text = " ".join(batch["target_text"])
335
+ vocab = list(set(all_text))
336
+ return {"vocab": [vocab], "all_text": [all_text]}
337
+
338
+ vocabs = datasets.map(
339
+ extract_all_chars,
340
+ batched=True,
341
+ batch_size=-1,
342
+ keep_in_memory=True,
343
+ remove_columns=datasets["train"].column_names,
344
+ )
345
+
346
+ # take union of all unique characters in each dataset
347
+ vocab_set = functools.reduce(
348
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
349
+ )
350
+
351
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
352
+
353
+ # replace white space with delimiter token
354
+ if word_delimiter_token is not None:
355
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
356
+ del vocab_dict[" "]
357
+
358
+ # add unk and pad token
359
+ if unk_token is not None:
360
+ vocab_dict[unk_token] = len(vocab_dict)
361
+
362
+ if pad_token is not None:
363
+ vocab_dict[pad_token] = len(vocab_dict)
364
+
365
+ return vocab_dict
366
+
367
+
368
+ def main():
369
+ # See all possible arguments in src/transformers/training_args.py
370
+ # or by passing the --help flag to this script.
371
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
372
+
373
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
374
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
375
+ # If we pass only one argument to the script and it's the path to a json file,
376
+ # let's parse it to get our arguments.
377
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
378
+ else:
379
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
380
+
381
+ # Detecting last checkpoint.
382
+ last_checkpoint = None
383
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
384
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
385
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
386
+ raise ValueError(
387
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
388
+ "Use --overwrite_output_dir to overcome."
389
+ )
390
+ elif last_checkpoint is not None:
391
+ logger.info(
392
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
393
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
394
+ )
395
+
396
+ # Setup logging
397
+ logging.basicConfig(
398
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
399
+ datefmt="%m/%d/%Y %H:%M:%S",
400
+ handlers=[logging.StreamHandler(sys.stdout)],
401
+ )
402
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
403
+
404
+ # Log on each process the small summary:
405
+ logger.warning(
406
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
407
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
408
+ )
409
+ # Set the verbosity to info of the Transformers logger (on main process only):
410
+ if is_main_process(training_args.local_rank):
411
+ transformers.utils.logging.set_verbosity_info()
412
+ logger.info("Training/evaluation parameters %s", training_args)
413
+
414
+ # Set seed before initializing model.
415
+ set_seed(training_args.seed)
416
+
417
+ # 1. First, let's load the dataset
418
+ te_dataset = get_telugu_dataset(validation_split=False)
419
+ def load_te_dataset(split):
420
+ return te_dataset[split]
421
+
422
+ raw_datasets = DatasetDict()
423
+
424
+ if training_args.do_train:
425
+ raw_datasets["train"] = load_te_dataset(
426
+ split=data_args.train_split_name
427
+ )
428
+
429
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
430
+ raise ValueError(
431
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
432
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
433
+ f"{', '.join(raw_datasets['train'].column_names)}."
434
+ )
435
+
436
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
437
+ raise ValueError(
438
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
439
+ "Make sure to set `--text_column_name` to the correct text column - one of "
440
+ f"{', '.join(raw_datasets['train'].column_names)}."
441
+ )
442
+
443
+ if data_args.max_train_samples is not None:
444
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
445
+
446
+ if training_args.do_eval:
447
+ raw_datasets["eval"] = load_te_dataset(
448
+ split=data_args.eval_split_name
449
+ )
450
+
451
+ if data_args.max_eval_samples is not None:
452
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
453
+
454
+ # 2. We remove some special characters from the datasets
455
+ # that make training complicated and do not help in transcribing the speech
456
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
457
+ # that could be easily picked up by the model
458
+ chars_to_ignore_regex = (
459
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
460
+ )
461
+ text_column_name = data_args.text_column_name
462
+
463
+ def remove_special_characters(batch):
464
+ if chars_to_ignore_regex is not None:
465
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
466
+ else:
467
+ batch["target_text"] = batch[text_column_name].lower() + " "
468
+ return batch
469
+
470
+ with training_args.main_process_first(desc="dataset map special characters removal"):
471
+ raw_datasets = raw_datasets.map(
472
+ remove_special_characters,
473
+ remove_columns=[text_column_name],
474
+ desc="remove special characters from datasets",
475
+ )
476
+
477
+ # save special tokens for tokenizer
478
+ word_delimiter_token = data_args.word_delimiter_token
479
+ unk_token = data_args.unk_token
480
+ pad_token = data_args.pad_token
481
+
482
+ # 3. Next, let's load the config as we might need it to create
483
+ # the tokenizer
484
+ # load config
485
+ config = AutoConfig.from_pretrained(
486
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
487
+ )
488
+
489
+ # 4. Next, if no tokenizer file is defined,
490
+ # we create the vocabulary of the model by extracting all unique characters from
491
+ # the training and evaluation datasets
492
+ # We need to make sure that only first rank saves vocabulary
493
+ # make sure all processes wait until vocab is created
494
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
495
+ tokenizer_kwargs = {}
496
+ if tokenizer_name_or_path is None:
497
+ # save vocab in training output dir
498
+ tokenizer_name_or_path = training_args.output_dir
499
+
500
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
501
+
502
+ with training_args.main_process_first():
503
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
504
+ os.remove(vocab_file)
505
+
506
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
507
+ if not os.path.isfile(vocab_file):
508
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
509
+ vocab_dict = create_vocabulary_from_data(
510
+ raw_datasets,
511
+ word_delimiter_token=word_delimiter_token,
512
+ unk_token=unk_token,
513
+ pad_token=pad_token,
514
+ )
515
+
516
+ # save vocab dict to be loaded into tokenizer
517
+ with open(vocab_file, "w") as file:
518
+ json.dump(vocab_dict, file)
519
+
520
+ # if tokenizer has just been created
521
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
522
+ tokenizer_kwargs = {
523
+ "config": config if config.tokenizer_class is not None else None,
524
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
525
+ "unk_token": unk_token,
526
+ "pad_token": pad_token,
527
+ "word_delimiter_token": word_delimiter_token,
528
+ }
529
+
530
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
531
+ # Note for distributed training, the .from_pretrained methods guarantee that only
532
+ # one local process can concurrently download model & vocab.
533
+
534
+ # load feature_extractor and tokenizer
535
+ tokenizer = AutoTokenizer.from_pretrained(
536
+ tokenizer_name_or_path,
537
+ use_auth_token=data_args.use_auth_token,
538
+ **tokenizer_kwargs,
539
+ )
540
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
541
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
542
+ )
543
+
544
+ # adapt config
545
+ config.update(
546
+ {
547
+ "feat_proj_dropout": model_args.feat_proj_dropout,
548
+ "attention_dropout": model_args.attention_dropout,
549
+ "hidden_dropout": model_args.hidden_dropout,
550
+ "final_dropout": model_args.final_dropout,
551
+ "mask_time_prob": model_args.mask_time_prob,
552
+ "mask_time_length": model_args.mask_time_length,
553
+ "mask_feature_prob": model_args.mask_feature_prob,
554
+ "mask_feature_length": model_args.mask_feature_length,
555
+ "gradient_checkpointing": training_args.gradient_checkpointing,
556
+ "layerdrop": model_args.layerdrop,
557
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
558
+ "pad_token_id": tokenizer.pad_token_id,
559
+ "vocab_size": len(tokenizer),
560
+ "activation_dropout": model_args.activation_dropout,
561
+ }
562
+ )
563
+
564
+ # create model
565
+ model = AutoModelForCTC.from_pretrained(
566
+ model_args.model_name_or_path,
567
+ cache_dir=model_args.cache_dir,
568
+ config=config,
569
+ use_auth_token=data_args.use_auth_token,
570
+ )
571
+
572
+ # freeze encoder
573
+ if model_args.freeze_feature_encoder:
574
+ model.freeze_feature_encoder()
575
+
576
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
577
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
578
+ # so that we just need to set the correct target sampling rate and normalize the input
579
+ # via the `feature_extractor`
580
+
581
+ # make sure that dataset decodes audio with correct sampling rate
582
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
583
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
584
+ raw_datasets = raw_datasets.cast_column(
585
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
586
+ )
587
+
588
+ # derive max & min input length for sample rate & max duration
589
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
590
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
591
+ audio_column_name = data_args.audio_column_name
592
+ num_workers = data_args.preprocessing_num_workers
593
+
594
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
595
+ phoneme_language = data_args.phoneme_language
596
+
597
+ # Preprocessing the datasets.
598
+ # We need to read the audio files as arrays and tokenize the targets.
599
+ def prepare_dataset(batch):
600
+ # load audio
601
+ sample = batch[audio_column_name]
602
+
603
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
604
+ batch["input_values"] = inputs.input_values[0]
605
+ batch["input_length"] = len(batch["input_values"])
606
+
607
+ # encode targets
608
+ additional_kwargs = {}
609
+ if phoneme_language is not None:
610
+ additional_kwargs["phonemizer_lang"] = phoneme_language
611
+
612
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
613
+ return batch
614
+
615
+ with training_args.main_process_first(desc="dataset map preprocessing"):
616
+ vectorized_datasets = raw_datasets.map(
617
+ prepare_dataset,
618
+ remove_columns=next(iter(raw_datasets.values())).column_names,
619
+ num_proc=num_workers,
620
+ desc="preprocess datasets",
621
+ )
622
+
623
+ def is_audio_in_length_range(length):
624
+ return length > min_input_length and length < max_input_length
625
+
626
+ # filter data that is shorter than min_input_length
627
+ vectorized_datasets = vectorized_datasets.filter(
628
+ is_audio_in_length_range,
629
+ num_proc=num_workers,
630
+ input_columns=["input_length"],
631
+ )
632
+
633
+ # 7. Next, we can prepare the training.
634
+ # Let's use word error rate (WER) as our evaluation metric,
635
+ # instantiate a data collator and the trainer
636
+
637
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
638
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
639
+
640
+ # for large datasets it is advised to run the preprocessing on a
641
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
642
+ # be a timeout when running the script in distributed mode.
643
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
644
+ # cached dataset
645
+ if data_args.preprocessing_only:
646
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
647
+ return
648
+
649
+ def compute_metrics(pred):
650
+ pred_logits = pred.predictions
651
+ pred_ids = np.argmax(pred_logits, axis=-1)
652
+
653
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
654
+
655
+ pred_str = tokenizer.batch_decode(pred_ids)
656
+ # we do not want to group tokens when computing the metrics
657
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
658
+
659
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
660
+
661
+ return metrics
662
+
663
+ # Now save everything to be able to create a single processor later
664
+ if is_main_process(training_args.local_rank):
665
+ # save feature extractor, tokenizer and config
666
+ feature_extractor.save_pretrained(training_args.output_dir)
667
+ tokenizer.save_pretrained(training_args.output_dir)
668
+ config.save_pretrained(training_args.output_dir)
669
+
670
+ try:
671
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
672
+ except (OSError, KeyError):
673
+ warnings.warn(
674
+ "Loading a processor from a feature extractor config that does not"
675
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
676
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
677
+ " `'processor_class': 'Wav2Vec2Processor'`",
678
+ FutureWarning,
679
+ )
680
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
681
+
682
+ # Instantiate custom data collator
683
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
684
+
685
+ decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm])
686
+ decay_parameters = [name for name in decay_parameters if "bias" not in name]
687
+ optimizer_grouped_parameters = [
688
+ {
689
+ "params": [p for n, p in model.named_parameters() if n in decay_parameters],
690
+ "weight_decay": training_args.weight_decay,
691
+ },
692
+ {
693
+ "params": [p for n, p in model.named_parameters() if n not in decay_parameters],
694
+ "weight_decay": 0.0,
695
+ },
696
+ ]
697
+ optimizer = bnb.optim.Adam8bit(
698
+ params=optimizer_grouped_parameters,
699
+ betas=(training_args.adam_beta1, training_args.adam_beta2),
700
+ eps=training_args.adam_epsilon,
701
+ )
702
+
703
+ optimizers = (optimizer, None)
704
+
705
+ # Initialize Trainer
706
+ trainer = Trainer(
707
+ model=model,
708
+ data_collator=data_collator,
709
+ args=training_args,
710
+ compute_metrics=compute_metrics,
711
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
712
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
713
+ tokenizer=feature_extractor,
714
+ optimizers=optimizers,
715
+ )
716
+
717
+ # 8. Finally, we can start training
718
+
719
+ # Training
720
+ if training_args.do_train:
721
+
722
+ # use last checkpoint if exist
723
+ if last_checkpoint is not None:
724
+ checkpoint = last_checkpoint
725
+ elif os.path.isdir(model_args.model_name_or_path):
726
+ checkpoint = model_args.model_name_or_path
727
+ else:
728
+ checkpoint = None
729
+
730
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
731
+ trainer.save_model()
732
+
733
+ metrics = train_result.metrics
734
+ max_train_samples = (
735
+ data_args.max_train_samples
736
+ if data_args.max_train_samples is not None
737
+ else len(vectorized_datasets["train"])
738
+ )
739
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
740
+
741
+ trainer.log_metrics("train", metrics)
742
+ trainer.save_metrics("train", metrics)
743
+ trainer.save_state()
744
+
745
+ # Evaluation
746
+ results = {}
747
+ if training_args.do_eval:
748
+ logger.info("*** Evaluate ***")
749
+ metrics = trainer.evaluate()
750
+ max_eval_samples = (
751
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
752
+ )
753
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
754
+
755
+ trainer.log_metrics("eval", metrics)
756
+ trainer.save_metrics("eval", metrics)
757
+
758
+ # Write model card and (optionally) push to hub
759
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
760
+ kwargs = {
761
+ "finetuned_from": model_args.model_name_or_path,
762
+ "tasks": "speech-recognition",
763
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
764
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
765
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
766
+ }
767
+ if "common_voice" in data_args.dataset_name:
768
+ kwargs["language"] = config_name
769
+
770
+ if training_args.push_to_hub:
771
+ trainer.push_to_hub(**kwargs)
772
+ else:
773
+ trainer.create_model_card(**kwargs)
774
+
775
+ return results
776
+
777
+
778
+ if __name__ == "__main__":
779
+ main()
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 9.413687669313871,
4
+ "train_runtime": 890.0468,
5
+ "train_samples": 3336,
6
+ "train_samples_per_second": 7.496,
7
+ "train_steps_per_second": 0.234
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9976019184652278,
5
+ "global_step": 208,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.96,
12
+ "learning_rate": 4.9500000000000004e-05,
13
+ "loss": 14.7256,
14
+ "step": 100
15
+ },
16
+ {
17
+ "epoch": 1.92,
18
+ "learning_rate": 9.95e-05,
19
+ "loss": 4.5709,
20
+ "step": 200
21
+ },
22
+ {
23
+ "epoch": 2.0,
24
+ "step": 208,
25
+ "total_flos": 9.397835469173249e+17,
26
+ "train_loss": 9.413687669313871,
27
+ "train_runtime": 890.0468,
28
+ "train_samples_per_second": 7.496,
29
+ "train_steps_per_second": 0.234
30
+ }
31
+ ],
32
+ "max_steps": 208,
33
+ "num_train_epochs": 2,
34
+ "total_flos": 9.397835469173249e+17,
35
+ "trial_name": null,
36
+ "trial_params": null
37
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b39e8b28f63935d809d3521ec723e978370eeaf831e42a8b1bbd867dd79e10c
3
+ size 2991
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"\\": 1, "_": 2, "e": 3, "g": 4, "l": 5, "n": 6, "p": 7, "r": 8, "s": 9, "t": 10, "ఁ": 11, "ం": 12, "ః": 13, "అ": 14, "ఆ": 15, "ఇ": 16, "ఈ": 17, "ఉ": 18, "ఊ": 19, "ఋ": 20, "ఎ": 21, "ఏ": 22, "ఐ": 23, "ఒ": 24, "ఓ": 25, "ఔ": 26, "క": 27, "ఖ": 28, "గ": 29, "ఘ": 30, "ఙ": 31, "చ": 32, "ఛ": 33, "జ": 34, "ఞ": 35, "ట": 36, "ఠ": 37, "డ": 38, "ఢ": 39, "ణ": 40, "త": 41, "థ": 42, "ద": 43, "ధ": 44, "న": 45, "ప": 46, "ఫ": 47, "బ": 48, "భ": 49, "మ": 50, "య": 51, "ర": 52, "ఱ": 53, "ల": 54, "ళ": 55, "వ": 56, "శ": 57, "ష": 58, "స": 59, "హ": 60, "ా": 61, "ి": 62, "ీ": 63, "ు": 64, "ూ": 65, "ృ": 66, "ె": 67, "ే": 68, "ై": 69, "ొ": 70, "ో": 71, "ౌ": 72, "్": 73, "‌": 74, "|": 0, "[UNK]": 75, "[PAD]": 76}