SaniyatMushrat commited on
Commit
bd6d7f8
1 Parent(s): 7f0e2a0
README.md CHANGED
@@ -1,3 +1,89 @@
1
  ---
 
 
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - bn
4
  license: apache-2.0
5
+ tags:
6
+ - automatic-speech-recognition
7
+ - hf-asr-leaderboard
8
+ - openslr_SLR53
9
+ - robust-speech-event
10
+ datasets:
11
+ - openslr
12
+ - SLR53
13
+ - Harveenchadha/indic-text
14
+ metrics:
15
+ - wer
16
+ - cer
17
+ model-index:
18
+ - name: Tahsin-Mayeesha/wav2vec2-bn-300m
19
+ results:
20
+ - task:
21
+ type: automatic-speech-recognition
22
+ name: Speech Recognition
23
+ dataset:
24
+ type: openslr
25
+ name: Open SLR
26
+ args: SLR66
27
+ metrics:
28
+ - type: wer
29
+ value: 0.31104373941386626
30
+ name: Test WER
31
+ - type: cer
32
+ value: 0.07263099973420006
33
+ name: Test CER
34
+ - type: wer
35
+ value: 0.17776164652632478
36
+ name: Test WER with lm
37
+ - type: cer
38
+ value: 0.04394092712884769
39
+ name: Test CER with lm
40
  ---
41
+
42
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the OPENSLR_SLR53 - bengali dataset.
43
+ It achieves the following results on the evaluation set.
44
+
45
+ Without language model :
46
+ - Wer: 0.3110
47
+ - Cer : 0.072
48
+
49
+ With 5 gram language model trained on [indic-text](https://huggingface.co/datasets/Harveenchadha/indic-text/tree/main) dataset :
50
+ - Wer: 0.17776
51
+ - Cer : 0.04394
52
+
53
+
54
+ Note : 10% of a total 218703 samples have been used for evaluation. Evaluation set has 21871 examples. Training was stopped after 30k steps. Output predictions are available under files section.
55
+
56
+ ### Training hyperparameters
57
+
58
+ The following hyperparameters were used during training:
59
+ - learning_rate: 7.5e-05
60
+ - train_batch_size: 16
61
+ - eval_batch_size: 16
62
+ - gradient_accumulation_steps: 4
63
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
+ - lr_scheduler_type: linear
65
+ - lr_scheduler_warmup_steps: 2000
66
+ - mixed_precision_training: Native AMP
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.16.0.dev0
71
+ - Pytorch 1.10.1+cu102
72
+ - Datasets 1.17.1.dev0
73
+ - Tokenizers 0.11.0
74
+
75
+ Note : Training and evaluation script modified from https://huggingface.co/chmanoj/xls-r-300m-te and https://github.com/huggingface/transformers/tree/master/examples/research_projects/robust-speech-event.
76
+ Bengali speech data was not available from common voice or librispeech multilingual datasets, so OpenSLR53 has been used.
77
+
78
+ Note 2 : Minimum audio duration of 0.1s has been used to filter the training data which excluded may be 10-20 samples.
79
+
80
+ # Citation
81
+
82
+ @misc {tahsin_mayeesha_2023,
83
+ author = { {Tahsin Mayeesha} },
84
+ title = { wav2vec2-bn-300m (Revision e10defc) },
85
+ year = 2023,
86
+ url = { https://huggingface.co/Tahsin-Mayeesha/wav2vec2-bn-300m },
87
+ doi = { 10.57967/hf/0939 },
88
+ publisher = { Hugging Face }
89
+ }
add lm decoder.ipynb ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "db2971a9",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "application/vnd.jupyter.widget-view+json": {
12
+ "model_id": "29f15da8fd9549188347df46955b078d",
13
+ "version_major": 2,
14
+ "version_minor": 0
15
+ },
16
+ "text/plain": [
17
+ "VBox(children=(HTML(value='<center>\\n<img src=https://huggingface.co/front/assets/huggingface_logo-noborder.sv…"
18
+ ]
19
+ },
20
+ "metadata": {},
21
+ "output_type": "display_data"
22
+ }
23
+ ],
24
+ "source": [
25
+ "from huggingface_hub import notebook_login\n",
26
+ "\n",
27
+ "notebook_login()"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 2,
33
+ "id": "2377a1e5",
34
+ "metadata": {},
35
+ "outputs": [
36
+ {
37
+ "name": "stderr",
38
+ "output_type": "stream",
39
+ "text": [
40
+ "Cloning https://huggingface.co/Tahsin-Mayeesha/wav2vec2-bn-300m into local empty directory.\n"
41
+ ]
42
+ },
43
+ {
44
+ "data": {
45
+ "application/vnd.jupyter.widget-view+json": {
46
+ "model_id": "9eacbc3325314c0b9e70d738ea655554",
47
+ "version_major": 2,
48
+ "version_minor": 0
49
+ },
50
+ "text/plain": [
51
+ "Download file pytorch_model.bin: 0%| | 594/1.18G [00:00<?, ?B/s]"
52
+ ]
53
+ },
54
+ "metadata": {},
55
+ "output_type": "display_data"
56
+ },
57
+ {
58
+ "data": {
59
+ "application/vnd.jupyter.widget-view+json": {
60
+ "model_id": "abce727d7bdc4291b5b8158ba46ca359",
61
+ "version_major": 2,
62
+ "version_minor": 0
63
+ },
64
+ "text/plain": [
65
+ "Download file runs/Feb02_18-57-15_job-adbfa1a2-412e-4cc9-8438-18b8de11318f/events.out.tfevents.1643828376.job-…"
66
+ ]
67
+ },
68
+ "metadata": {},
69
+ "output_type": "display_data"
70
+ },
71
+ {
72
+ "data": {
73
+ "application/vnd.jupyter.widget-view+json": {
74
+ "model_id": "990958b3a4894a58bbc7b275683aadc5",
75
+ "version_major": 2,
76
+ "version_minor": 0
77
+ },
78
+ "text/plain": [
79
+ "Download file runs/Feb02_18-57-15_job-adbfa1a2-412e-4cc9-8438-18b8de11318f/1643828376.0908198/events.out.tfeve…"
80
+ ]
81
+ },
82
+ "metadata": {},
83
+ "output_type": "display_data"
84
+ },
85
+ {
86
+ "data": {
87
+ "application/vnd.jupyter.widget-view+json": {
88
+ "model_id": "0e7b5cf37e13411f861ab6f97a7086f5",
89
+ "version_major": 2,
90
+ "version_minor": 0
91
+ },
92
+ "text/plain": [
93
+ "Download file training_args.bin: 100%|##########| 2.92k/2.92k [00:00<?, ?B/s]"
94
+ ]
95
+ },
96
+ "metadata": {},
97
+ "output_type": "display_data"
98
+ },
99
+ {
100
+ "data": {
101
+ "application/vnd.jupyter.widget-view+json": {
102
+ "model_id": "083dfac65da1440bbb1b44d0ef9736b2",
103
+ "version_major": 2,
104
+ "version_minor": 0
105
+ },
106
+ "text/plain": [
107
+ "Clean file runs/Feb02_18-57-15_job-adbfa1a2-412e-4cc9-8438-18b8de11318f/1643828376.0908198/events.out.tfevents…"
108
+ ]
109
+ },
110
+ "metadata": {},
111
+ "output_type": "display_data"
112
+ },
113
+ {
114
+ "data": {
115
+ "application/vnd.jupyter.widget-view+json": {
116
+ "model_id": "de458e6cd4844842a9518ab9a6555742",
117
+ "version_major": 2,
118
+ "version_minor": 0
119
+ },
120
+ "text/plain": [
121
+ "Clean file training_args.bin: 34%|###4 | 1.00k/2.92k [00:00<?, ?B/s]"
122
+ ]
123
+ },
124
+ "metadata": {},
125
+ "output_type": "display_data"
126
+ },
127
+ {
128
+ "data": {
129
+ "application/vnd.jupyter.widget-view+json": {
130
+ "model_id": "83b6f9d8c39649e382025e1462d12ee0",
131
+ "version_major": 2,
132
+ "version_minor": 0
133
+ },
134
+ "text/plain": [
135
+ "Clean file runs/Feb02_18-57-15_job-adbfa1a2-412e-4cc9-8438-18b8de11318f/events.out.tfevents.1643828376.job-adb…"
136
+ ]
137
+ },
138
+ "metadata": {},
139
+ "output_type": "display_data"
140
+ },
141
+ {
142
+ "data": {
143
+ "application/vnd.jupyter.widget-view+json": {
144
+ "model_id": "95142f5b1abc4218b5ed797b9edffbbc",
145
+ "version_major": 2,
146
+ "version_minor": 0
147
+ },
148
+ "text/plain": [
149
+ "Clean file pytorch_model.bin: 0%| | 1.00k/1.18G [00:00<?, ?B/s]"
150
+ ]
151
+ },
152
+ "metadata": {},
153
+ "output_type": "display_data"
154
+ }
155
+ ],
156
+ "source": [
157
+ "from huggingface_hub import Repository\n",
158
+ "\n",
159
+ "repo = Repository(local_dir=\"wav2vec2-bn-300m\", clone_from=\"Tahsin-Mayeesha/wav2vec2-bn-300m\")"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": 4,
165
+ "id": "091991cd",
166
+ "metadata": {},
167
+ "outputs": [
168
+ {
169
+ "data": {
170
+ "application/vnd.jupyter.widget-view+json": {
171
+ "model_id": "ab4eb980966140b7a6bca34bc51c3fd2",
172
+ "version_major": 2,
173
+ "version_minor": 0
174
+ },
175
+ "text/plain": [
176
+ "Downloading: 0%| | 0.00/212 [00:00<?, ?B/s]"
177
+ ]
178
+ },
179
+ "metadata": {},
180
+ "output_type": "display_data"
181
+ },
182
+ {
183
+ "data": {
184
+ "application/vnd.jupyter.widget-view+json": {
185
+ "model_id": "442c24c800b949238d33dd9ad2100c09",
186
+ "version_major": 2,
187
+ "version_minor": 0
188
+ },
189
+ "text/plain": [
190
+ "Downloading: 0%| | 0.00/260 [00:00<?, ?B/s]"
191
+ ]
192
+ },
193
+ "metadata": {},
194
+ "output_type": "display_data"
195
+ },
196
+ {
197
+ "data": {
198
+ "application/vnd.jupyter.widget-view+json": {
199
+ "model_id": "69504d7b2d994c6789cdc9b27c00bc5b",
200
+ "version_major": 2,
201
+ "version_minor": 0
202
+ },
203
+ "text/plain": [
204
+ "Downloading: 0%| | 0.00/1.99k [00:00<?, ?B/s]"
205
+ ]
206
+ },
207
+ "metadata": {},
208
+ "output_type": "display_data"
209
+ },
210
+ {
211
+ "data": {
212
+ "application/vnd.jupyter.widget-view+json": {
213
+ "model_id": "f8290e53eb4f402e99b6d71bd5836955",
214
+ "version_major": 2,
215
+ "version_minor": 0
216
+ },
217
+ "text/plain": [
218
+ "Downloading: 0%| | 0.00/1.13k [00:00<?, ?B/s]"
219
+ ]
220
+ },
221
+ "metadata": {},
222
+ "output_type": "display_data"
223
+ },
224
+ {
225
+ "data": {
226
+ "application/vnd.jupyter.widget-view+json": {
227
+ "model_id": "abaa5f4a82de4ec8b27a67178addfcb9",
228
+ "version_major": 2,
229
+ "version_minor": 0
230
+ },
231
+ "text/plain": [
232
+ "Downloading: 0%| | 0.00/25.0 [00:00<?, ?B/s]"
233
+ ]
234
+ },
235
+ "metadata": {},
236
+ "output_type": "display_data"
237
+ },
238
+ {
239
+ "data": {
240
+ "application/vnd.jupyter.widget-view+json": {
241
+ "model_id": "9b6186e1a4df4999b71e8ea2e8f9d392",
242
+ "version_major": 2,
243
+ "version_minor": 0
244
+ },
245
+ "text/plain": [
246
+ "Downloading: 0%| | 0.00/309 [00:00<?, ?B/s]"
247
+ ]
248
+ },
249
+ "metadata": {},
250
+ "output_type": "display_data"
251
+ }
252
+ ],
253
+ "source": [
254
+ "from transformers import AutoProcessor\n",
255
+ "processor = AutoProcessor.from_pretrained(\"Tahsin-Mayeesha/wav2vec2-bn-300m\")"
256
+ ]
257
+ },
258
+ {
259
+ "cell_type": "code",
260
+ "execution_count": 5,
261
+ "id": "3507c167",
262
+ "metadata": {},
263
+ "outputs": [],
264
+ "source": [
265
+ "vocab_dict = processor.tokenizer.get_vocab()\n",
266
+ "sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}"
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "code",
271
+ "execution_count": 6,
272
+ "id": "15ee83a8",
273
+ "metadata": {},
274
+ "outputs": [
275
+ {
276
+ "name": "stderr",
277
+ "output_type": "stream",
278
+ "text": [
279
+ "Found entries of length > 1 in alphabet. This is unusual unless style is BPE, but the alphabet was not recognized as BPE type. Is this correct?\n"
280
+ ]
281
+ }
282
+ ],
283
+ "source": [
284
+ "from pyctcdecode import build_ctcdecoder\n",
285
+ "\n",
286
+ "decoder = build_ctcdecoder(\n",
287
+ " labels=list(sorted_vocab_dict.keys()),\n",
288
+ " kenlm_model_path=\"5gram.arpa\",\n",
289
+ ")"
290
+ ]
291
+ },
292
+ {
293
+ "cell_type": "code",
294
+ "execution_count": 7,
295
+ "id": "46585ac6",
296
+ "metadata": {},
297
+ "outputs": [],
298
+ "source": [
299
+ "from transformers import Wav2Vec2ProcessorWithLM\n",
300
+ "\n",
301
+ "processor_with_lm = Wav2Vec2ProcessorWithLM(\n",
302
+ " feature_extractor=processor.feature_extractor,\n",
303
+ " tokenizer=processor.tokenizer,\n",
304
+ " decoder=decoder\n",
305
+ ")"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": 8,
311
+ "id": "c17befdc",
312
+ "metadata": {},
313
+ "outputs": [],
314
+ "source": [
315
+ "processor_with_lm.save_pretrained(\"wav2vec2-bn-300m\")"
316
+ ]
317
+ },
318
+ {
319
+ "cell_type": "code",
320
+ "execution_count": 9,
321
+ "id": "f3ec60c4",
322
+ "metadata": {},
323
+ "outputs": [
324
+ {
325
+ "name": "stderr",
326
+ "output_type": "stream",
327
+ "text": [
328
+ "Adding files tracked by Git LFS: ['language_model/unigrams.txt']. This may take a bit of time if the files are large.\n"
329
+ ]
330
+ },
331
+ {
332
+ "data": {
333
+ "application/vnd.jupyter.widget-view+json": {
334
+ "model_id": "7aa6e28e8a9c49b79b09f5d2884383d7",
335
+ "version_major": 2,
336
+ "version_minor": 0
337
+ },
338
+ "text/plain": [
339
+ "Upload file language_model/unigrams.txt: 0%| | 3.38k/22.3M [00:00<?, ?B/s]"
340
+ ]
341
+ },
342
+ "metadata": {},
343
+ "output_type": "display_data"
344
+ },
345
+ {
346
+ "name": "stderr",
347
+ "output_type": "stream",
348
+ "text": [
349
+ "To https://huggingface.co/Tahsin-Mayeesha/wav2vec2-bn-300m\n",
350
+ " b6e6996..258816a main -> main\n",
351
+ "\n"
352
+ ]
353
+ },
354
+ {
355
+ "data": {
356
+ "text/plain": [
357
+ "'https://huggingface.co/Tahsin-Mayeesha/wav2vec2-bn-300m/commit/258816acfe8e1e49f41b4edcf9f20f812b4bf00d'"
358
+ ]
359
+ },
360
+ "execution_count": 9,
361
+ "metadata": {},
362
+ "output_type": "execute_result"
363
+ }
364
+ ],
365
+ "source": [
366
+ "repo.push_to_hub(commit_message=\"Upload lm-boosted decoder\")"
367
+ ]
368
+ },
369
+ {
370
+ "cell_type": "code",
371
+ "execution_count": null,
372
+ "id": "add2d4ca",
373
+ "metadata": {},
374
+ "outputs": [],
375
+ "source": []
376
+ }
377
+ ],
378
+ "metadata": {
379
+ "kernelspec": {
380
+ "display_name": "Python 3",
381
+ "language": "python",
382
+ "name": "python3"
383
+ },
384
+ "language_info": {
385
+ "codemirror_mode": {
386
+ "name": "ipython",
387
+ "version": 3
388
+ },
389
+ "file_extension": ".py",
390
+ "mimetype": "text/x-python",
391
+ "name": "python",
392
+ "nbconvert_exporter": "python",
393
+ "pygments_lexer": "ipython3",
394
+ "version": "3.8.8"
395
+ }
396
+ },
397
+ "nbformat": 4,
398
+ "nbformat_minor": 5
399
+ }
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 110, "</s>": 111}
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "_", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "x", "y", "z", "\u0093", "\u0094", "\u0153", "\u0964", "\u0981", "\u0982", "\u0983", "\u0985", "\u0986", "\u0987", "\u0988", "\u0989", "\u098a", "\u098b", "\u098f", "\u0990", "\u0993", "\u0994", "\u0995", "\u0996", "\u0997", "\u0998", "\u0999", "\u099a", "\u099b", "\u099c", "\u099d", "\u099e", "\u099f", "\u09a0", "\u09a1", "\u09a2", "\u09a3", "\u09a4", "\u09a5", "\u09a6", "\u09a7", "\u09a8", "\u09aa", "\u09ab", "\u09ac", "\u09ad", "\u09ae", "\u09af", "\u09b0", "\u09b2", "\u09b6", "\u09b7", "\u09b8", "\u09b9", "\u09bc", "\u09be", "\u09bf", "\u09c0", "\u09c1", "\u09c2", "\u09c3", "\u09c7", "\u09c8", "\u09cb", "\u09cc", "\u09cd", "\u09ce", "\u09d7", "\u09dc", "\u09dd", "\u09df", "\u09e6", "\u09e7", "\u09e8", "\u09e9", "\u09ea", "\u09eb", "\u09ec", "\u09ed", "\u09ee", "\u09ef", "\u09f0", "\u200c", "\u200d", "\u200e", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 64,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.25,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.75,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 109,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.17.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 112,
106
+ "xvector_output_dim": 512
107
+ }
eval.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import re
4
+ from typing import Dict
5
+
6
+ from datasets import Audio, Dataset, load_dataset, load_metric, DatasetDict
7
+
8
+ from transformers import AutoFeatureExtractor, pipeline
9
+
10
+
11
+ def log_results(result: Dataset, args: Dict[str, str]):
12
+ """DO NOT CHANGE. This function computes and logs the result metrics."""
13
+
14
+ log_outputs = args.log_outputs
15
+ dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
16
+
17
+ # load metric
18
+ wer = load_metric("wer")
19
+ cer = load_metric("cer")
20
+
21
+ # compute metrics
22
+ wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
23
+ cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
24
+
25
+ # print & log results
26
+ result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
27
+ print(result_str)
28
+
29
+ with open(f"{dataset_id}_eval_results.txt", "w") as f:
30
+ f.write(result_str)
31
+
32
+ # log all results in text file. Possibly interesting for analysis
33
+ if log_outputs is not None:
34
+ pred_file = f"log_{dataset_id}_predictions.txt"
35
+ target_file = f"log_{dataset_id}_targets.txt"
36
+
37
+ with open(pred_file, "w") as p, open(target_file, "w") as t:
38
+
39
+ # mapping function to write output
40
+ def write_to_file(batch, i):
41
+ p.write(f"{i}" + "\n")
42
+ p.write(batch["prediction"] + "\n")
43
+ t.write(f"{i}" + "\n")
44
+ t.write(batch["target"] + "\n")
45
+
46
+ result.map(write_to_file, with_indices=True)
47
+
48
+
49
+ def normalize_text(text: str) -> str:
50
+ """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
51
+
52
+ chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
53
+
54
+ text = re.sub(chars_to_ignore_regex, "", text.lower())
55
+
56
+ # In addition, we can normalize the target text, e.g. removing new lines characters etc...
57
+ # note that order is important here!
58
+ token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
59
+
60
+ for t in token_sequences_to_ignore:
61
+ text = " ".join(text.split(t))
62
+
63
+ return text
64
+
65
+
66
+ def get_bengali_dataset(validation_split=False):
67
+ dataset = load_dataset('openslr', 'SLR53')
68
+
69
+ seed=1242
70
+
71
+ if validation_split:
72
+ train_testvalid = dataset['train'].train_test_split(test_size=0.2, seed=seed)
73
+ # Split the 10% test + valid in half test, half valid
74
+ test_valid = train_testvalid['test'].train_test_split(test_size=0.33, seed=seed)
75
+ # gather everyone if you want to have a single DatasetDict
76
+ out_dataset = DatasetDict({
77
+ 'train': train_testvalid['train'],
78
+ 'test': test_valid['test'],
79
+ 'valid': test_valid['train']})
80
+ else:
81
+ train_testvalid = dataset['train'].train_test_split(test_size=0.1, seed=seed)
82
+ out_dataset = DatasetDict({
83
+ 'train': train_testvalid['train'],
84
+ 'test': train_testvalid['test']})
85
+ return out_dataset
86
+
87
+
88
+ def main(args):
89
+ # load dataset
90
+ bn_dataset = get_bengali_dataset(validation_split=False)
91
+ def load_bn_dataset(split):
92
+ return bn_dataset[split]
93
+
94
+ # dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
95
+ dataset = load_bn_dataset(split=args.split)
96
+
97
+ # for testing: only process the first two examples as a test
98
+ # dataset = dataset.select(range(10))
99
+
100
+ # load processor
101
+ feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
102
+ sampling_rate = feature_extractor.sampling_rate
103
+
104
+ # resample audio
105
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
106
+
107
+ # load eval pipeline
108
+ asr = pipeline("automatic-speech-recognition", model=args.model_id, device=0)
109
+
110
+ # map function to decode audio
111
+ def map_to_pred(batch):
112
+ prediction = asr(
113
+ batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
114
+ )
115
+
116
+ batch["prediction"] = prediction["text"]
117
+ batch["target"] = normalize_text(batch["sentence"])
118
+ return batch
119
+
120
+ # run inference on all examples
121
+ result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
122
+
123
+ # compute and log_results
124
+ # do not change function below
125
+ log_results(result, args)
126
+
127
+
128
+ if __name__ == "__main__":
129
+ parser = argparse.ArgumentParser()
130
+
131
+ parser.add_argument(
132
+ "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
133
+ )
134
+ parser.add_argument(
135
+ "--dataset",
136
+ type=str,
137
+ required=True,
138
+ help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
139
+ )
140
+ parser.add_argument(
141
+ "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
142
+ )
143
+ parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
144
+ parser.add_argument(
145
+ "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
146
+ )
147
+ parser.add_argument(
148
+ "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
149
+ )
150
+ parser.add_argument(
151
+ "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
152
+ )
153
+ args = parser.parse_args()
154
+
155
+ main(args)
eval_run.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ python eval.py \
2
+ --model_id="Tahsin-Mayeesha/wav2vec2-bn-300m" \
3
+ --dataset="openslr_SLR53" \
4
+ --config="bn"\
5
+ --split="test" \
6
+ --log_outputs
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
run.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc.py \
2
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
3
+ --dataset_name="openslr_SLR53" \
4
+ --train_split_name="train" \
5
+ --preprocessing_num_workers="8" \
6
+ --output_dir="./" \
7
+ --overwrite_output_dir \
8
+ --num_train_epochs="70" \
9
+ --per_device_train_batch_size="16" \
10
+ --per_device_eval_batch_size="16" \
11
+ --gradient_accumulation_steps="4" \
12
+ --learning_rate="7.5e-5" \
13
+ --warmup_steps="2000" \
14
+ --length_column_name="input_length" \
15
+ --evaluation_strategy="steps" \
16
+ --text_column_name="sentence" \
17
+ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
18
+ --min_duration_in_seconds="0.1" \
19
+ --save_steps="500" \
20
+ --eval_steps="500" \
21
+ --logging_steps="100" \
22
+ --layerdrop="0.0" \
23
+ --activation_dropout="0.1" \
24
+ --save_total_limit="3" \
25
+ --freeze_feature_encoder \
26
+ --feat_proj_dropout="0.0" \
27
+ --mask_time_prob="0.75" \
28
+ --mask_time_length="10" \
29
+ --mask_feature_prob="0.25" \
30
+ --mask_feature_length="64" \
31
+ --gradient_checkpointing \
32
+ --use_auth_token \
33
+ --fp16 \
34
+ --group_by_length \
35
+ --do_train --do_eval \
36
+ --push_to_hub
run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,756 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import transformers
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoFeatureExtractor,
37
+ AutoModelForCTC,
38
+ AutoProcessor,
39
+ AutoTokenizer,
40
+ HfArgumentParser,
41
+ Trainer,
42
+ TrainingArguments,
43
+ Wav2Vec2Processor,
44
+ set_seed,
45
+ )
46
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
+ from transformers.utils import check_min_version
48
+ from transformers.utils.versions import require_version
49
+
50
+
51
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
+ check_min_version("4.16.0.dev0")
53
+
54
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def list_field(default=None, metadata=None):
61
+ return field(default_factory=lambda: default, metadata=metadata)
62
+
63
+ def get_bengali_dataset(validation_split=False):
64
+ dataset = load_dataset('openslr', 'SLR53')
65
+
66
+ seed=1242
67
+
68
+ if validation_split:
69
+ train_testvalid = dataset['train'].train_test_split(test_size=0.2, seed=seed)
70
+ # Split the 10% test + valid in half test, half valid
71
+ test_valid = train_testvalid['test'].train_test_split(test_size=0.33, seed=seed)
72
+ # gather everyone if you want to have a single DatasetDict
73
+ out_dataset = DatasetDict({
74
+ 'train': train_testvalid['train'],
75
+ 'test': test_valid['test'],
76
+ 'valid': test_valid['train']})
77
+ else:
78
+ train_testvalid = dataset['train'].train_test_split(test_size=0.1, seed=seed)
79
+ out_dataset = DatasetDict({
80
+ 'train': train_testvalid['train'],
81
+ 'test': train_testvalid['test']})
82
+ return out_dataset
83
+
84
+
85
+ @dataclass
86
+ class ModelArguments:
87
+ """
88
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
89
+ """
90
+
91
+ model_name_or_path: str = field(
92
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
93
+ )
94
+ tokenizer_name_or_path: Optional[str] = field(
95
+ default=None,
96
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
97
+ )
98
+ cache_dir: Optional[str] = field(
99
+ default=None,
100
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
101
+ )
102
+ freeze_feature_encoder: bool = field(
103
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
104
+ )
105
+ attention_dropout: float = field(
106
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
107
+ )
108
+ activation_dropout: float = field(
109
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
110
+ )
111
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
112
+ hidden_dropout: float = field(
113
+ default=0.0,
114
+ metadata={
115
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
116
+ },
117
+ )
118
+ final_dropout: float = field(
119
+ default=0.0,
120
+ metadata={"help": "The dropout probability for the final projection layer."},
121
+ )
122
+ mask_time_prob: float = field(
123
+ default=0.05,
124
+ metadata={
125
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
126
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
127
+ "vectors will be masked along the time axis."
128
+ },
129
+ )
130
+ mask_time_length: int = field(
131
+ default=10,
132
+ metadata={"help": "Length of vector span to mask along the time axis."},
133
+ )
134
+ mask_feature_prob: float = field(
135
+ default=0.0,
136
+ metadata={
137
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
138
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
139
+ },
140
+ )
141
+ mask_feature_length: int = field(
142
+ default=10,
143
+ metadata={"help": "Length of vector span to mask along the feature axis."},
144
+ )
145
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
146
+ ctc_loss_reduction: Optional[str] = field(
147
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
148
+ )
149
+
150
+
151
+ @dataclass
152
+ class DataTrainingArguments:
153
+ """
154
+ Arguments pertaining to what data we are going to input our model for training and eval.
155
+
156
+ Using `HfArgumentParser` we can turn this class
157
+ into argparse arguments to be able to specify them on
158
+ the command line.
159
+ """
160
+
161
+ dataset_name: str = field(
162
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
163
+ )
164
+ dataset_config_name: str = field(
165
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
166
+ )
167
+ train_split_name: str = field(
168
+ default="train+validation",
169
+ metadata={
170
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
171
+ },
172
+ )
173
+ eval_split_name: str = field(
174
+ default="test",
175
+ metadata={
176
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
177
+ },
178
+ )
179
+ audio_column_name: str = field(
180
+ default="audio",
181
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
182
+ )
183
+ text_column_name: str = field(
184
+ default="text",
185
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
186
+ )
187
+ overwrite_cache: bool = field(
188
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
189
+ )
190
+ preprocessing_num_workers: Optional[int] = field(
191
+ default=None,
192
+ metadata={"help": "The number of processes to use for the preprocessing."},
193
+ )
194
+ max_train_samples: Optional[int] = field(
195
+ default=None,
196
+ metadata={
197
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
198
+ "value if set."
199
+ },
200
+ )
201
+ max_eval_samples: Optional[int] = field(
202
+ default=None,
203
+ metadata={
204
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
205
+ "value if set."
206
+ },
207
+ )
208
+ chars_to_ignore: Optional[List[str]] = list_field(
209
+ default=None,
210
+ metadata={"help": "A list of characters to remove from the transcripts."},
211
+ )
212
+ eval_metrics: List[str] = list_field(
213
+ default=["wer"],
214
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
215
+ )
216
+ max_duration_in_seconds: float = field(
217
+ default=20.0,
218
+ metadata={
219
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
220
+ },
221
+ )
222
+ min_duration_in_seconds: float = field(
223
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
224
+ )
225
+ preprocessing_only: bool = field(
226
+ default=False,
227
+ metadata={
228
+ "help": "Whether to only do data preprocessing and skip training. "
229
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
230
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
231
+ "so that the cached datasets can consequently be loaded in distributed training"
232
+ },
233
+ )
234
+ use_auth_token: bool = field(
235
+ default=False,
236
+ metadata={
237
+ "help": "If :obj:`True`, will use the token generated when running"
238
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
239
+ },
240
+ )
241
+ unk_token: str = field(
242
+ default="[UNK]",
243
+ metadata={"help": "The unk token for the tokenizer"},
244
+ )
245
+ pad_token: str = field(
246
+ default="[PAD]",
247
+ metadata={"help": "The padding token for the tokenizer"},
248
+ )
249
+ word_delimiter_token: str = field(
250
+ default="|",
251
+ metadata={"help": "The word delimiter token for the tokenizer"},
252
+ )
253
+ phoneme_language: Optional[str] = field(
254
+ default=None,
255
+ metadata={
256
+ "help": "The target language that should be used be"
257
+ " passed to the tokenizer for tokenization. Note that"
258
+ " this is only relevant if the model classifies the"
259
+ " input audio to a sequence of phoneme sequences."
260
+ },
261
+ )
262
+
263
+
264
+ @dataclass
265
+ class DataCollatorCTCWithPadding:
266
+ """
267
+ Data collator that will dynamically pad the inputs received.
268
+ Args:
269
+ processor (:class:`~transformers.AutoProcessor`)
270
+ The processor used for proccessing the data.
271
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
272
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
273
+ among:
274
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
275
+ sequence if provided).
276
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
277
+ maximum acceptable input length for the model if that argument is not provided.
278
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
279
+ different lengths).
280
+ max_length (:obj:`int`, `optional`):
281
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
282
+ max_length_labels (:obj:`int`, `optional`):
283
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
284
+ pad_to_multiple_of (:obj:`int`, `optional`):
285
+ If set will pad the sequence to a multiple of the provided value.
286
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
287
+ 7.5 (Volta).
288
+ """
289
+
290
+ processor: AutoProcessor
291
+ padding: Union[bool, str] = "longest"
292
+ pad_to_multiple_of: Optional[int] = None
293
+ pad_to_multiple_of_labels: Optional[int] = None
294
+
295
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
296
+ # split inputs and labels since they have to be of different lenghts and need
297
+ # different padding methods
298
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
299
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
300
+
301
+ batch = self.processor.pad(
302
+ input_features,
303
+ padding=self.padding,
304
+ pad_to_multiple_of=self.pad_to_multiple_of,
305
+ return_tensors="pt",
306
+ )
307
+
308
+ with self.processor.as_target_processor():
309
+ labels_batch = self.processor.pad(
310
+ label_features,
311
+ padding=self.padding,
312
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
313
+ return_tensors="pt",
314
+ )
315
+
316
+ # replace padding with -100 to ignore loss correctly
317
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
318
+
319
+ batch["labels"] = labels
320
+
321
+ return batch
322
+
323
+
324
+ def create_vocabulary_from_data(
325
+ datasets: DatasetDict,
326
+ word_delimiter_token: Optional[str] = None,
327
+ unk_token: Optional[str] = None,
328
+ pad_token: Optional[str] = None,
329
+ ):
330
+ # Given training and test labels create vocabulary
331
+ def extract_all_chars(batch):
332
+ all_text = " ".join(batch["target_text"])
333
+ vocab = list(set(all_text))
334
+ return {"vocab": [vocab], "all_text": [all_text]}
335
+
336
+ vocabs = datasets.map(
337
+ extract_all_chars,
338
+ batched=True,
339
+ batch_size=-1,
340
+ keep_in_memory=True,
341
+ remove_columns=datasets["train"].column_names,
342
+ )
343
+
344
+ # take union of all unique characters in each dataset
345
+ vocab_set = functools.reduce(
346
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
347
+ )
348
+
349
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
350
+
351
+ # replace white space with delimiter token
352
+ if word_delimiter_token is not None:
353
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
354
+ del vocab_dict[" "]
355
+
356
+ # add unk and pad token
357
+ if unk_token is not None:
358
+ vocab_dict[unk_token] = len(vocab_dict)
359
+
360
+ if pad_token is not None:
361
+ vocab_dict[pad_token] = len(vocab_dict)
362
+
363
+ return vocab_dict
364
+
365
+
366
+ def main():
367
+ # See all possible arguments in src/transformers/training_args.py
368
+ # or by passing the --help flag to this script.
369
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
370
+
371
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
372
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
373
+ # If we pass only one argument to the script and it's the path to a json file,
374
+ # let's parse it to get our arguments.
375
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
376
+ else:
377
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
378
+
379
+ # Setup logging
380
+ logging.basicConfig(
381
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
382
+ datefmt="%m/%d/%Y %H:%M:%S",
383
+ handlers=[logging.StreamHandler(sys.stdout)],
384
+ )
385
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
386
+
387
+ # Detecting last checkpoint.
388
+ last_checkpoint = None
389
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
390
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
391
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
392
+ raise ValueError(
393
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
394
+ "Use --overwrite_output_dir to overcome."
395
+ )
396
+ elif last_checkpoint is not None:
397
+ logger.info(
398
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
399
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
400
+ )
401
+
402
+ # Log on each process the small summary:
403
+ logger.warning(
404
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
405
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
406
+ )
407
+ # Set the verbosity to info of the Transformers logger (on main process only):
408
+ if is_main_process(training_args.local_rank):
409
+ transformers.utils.logging.set_verbosity_info()
410
+ logger.info("Training/evaluation parameters %s", training_args)
411
+
412
+ # Set seed before initializing model.
413
+ set_seed(training_args.seed)
414
+
415
+ # 1. First, let's load the dataset
416
+ bn_dataset = get_bengali_dataset(validation_split=False)
417
+ def load_bn_dataset(split):
418
+ return bn_dataset[split]
419
+
420
+ raw_datasets = DatasetDict()
421
+
422
+ if training_args.do_train:
423
+ raw_datasets["train"] = load_bn_dataset(
424
+ split=data_args.train_split_name
425
+ )
426
+
427
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
428
+ raise ValueError(
429
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
430
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
431
+ f"{', '.join(raw_datasets['train'].column_names)}."
432
+ )
433
+
434
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
435
+ raise ValueError(
436
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
437
+ "Make sure to set `--text_column_name` to the correct text column - one of "
438
+ f"{', '.join(raw_datasets['train'].column_names)}."
439
+ )
440
+
441
+ if data_args.max_train_samples is not None:
442
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
443
+
444
+ if training_args.do_eval:
445
+ raw_datasets["eval"] = load_bn_dataset(
446
+ split=data_args.eval_split_name
447
+ )
448
+
449
+ if data_args.max_eval_samples is not None:
450
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
451
+
452
+ # 2. We remove some special characters from the datasets
453
+ # that make training complicated and do not help in transcribing the speech
454
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
455
+ # that could be easily picked up by the model
456
+ chars_to_ignore_regex = (
457
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
458
+ )
459
+ text_column_name = data_args.text_column_name
460
+
461
+ def remove_special_characters(batch):
462
+ if chars_to_ignore_regex is not None:
463
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
464
+ else:
465
+ batch["target_text"] = batch[text_column_name].lower() + " "
466
+ return batch
467
+
468
+ with training_args.main_process_first(desc="dataset map special characters removal"):
469
+ raw_datasets = raw_datasets.map(
470
+ remove_special_characters,
471
+ remove_columns=[text_column_name],
472
+ desc="remove special characters from datasets",
473
+ )
474
+
475
+ # save special tokens for tokenizer
476
+ word_delimiter_token = data_args.word_delimiter_token
477
+ unk_token = data_args.unk_token
478
+ pad_token = data_args.pad_token
479
+
480
+ # 3. Next, let's load the config as we might need it to create
481
+ # the tokenizer
482
+ # load config
483
+ config = AutoConfig.from_pretrained(
484
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
485
+ )
486
+
487
+ # 4. Next, if no tokenizer file is defined,
488
+ # we create the vocabulary of the model by extracting all unique characters from
489
+ # the training and evaluation datasets
490
+ # We need to make sure that only first rank saves vocabulary
491
+ # make sure all processes wait until vocab is created
492
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
493
+ tokenizer_kwargs = {}
494
+ if tokenizer_name_or_path is None:
495
+ # save vocab in training output dir
496
+ tokenizer_name_or_path = training_args.output_dir
497
+
498
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
499
+
500
+ with training_args.main_process_first():
501
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
502
+ os.remove(vocab_file)
503
+
504
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
505
+ if not os.path.isfile(vocab_file):
506
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
507
+ vocab_dict = create_vocabulary_from_data(
508
+ raw_datasets,
509
+ word_delimiter_token=word_delimiter_token,
510
+ unk_token=unk_token,
511
+ pad_token=pad_token,
512
+ )
513
+
514
+ # save vocab dict to be loaded into tokenizer
515
+ with open(vocab_file, "w") as file:
516
+ json.dump(vocab_dict, file)
517
+
518
+ # if tokenizer has just been created
519
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
520
+ tokenizer_kwargs = {
521
+ "config": config if config.tokenizer_class is not None else None,
522
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
523
+ "unk_token": unk_token,
524
+ "pad_token": pad_token,
525
+ "word_delimiter_token": word_delimiter_token,
526
+ }
527
+
528
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
529
+ # Note for distributed training, the .from_pretrained methods guarantee that only
530
+ # one local process can concurrently download model & vocab.
531
+
532
+ # load feature_extractor and tokenizer
533
+ tokenizer = AutoTokenizer.from_pretrained(
534
+ tokenizer_name_or_path,
535
+ use_auth_token=data_args.use_auth_token,
536
+ **tokenizer_kwargs,
537
+ )
538
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
539
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
540
+ )
541
+
542
+ # adapt config
543
+ config.update(
544
+ {
545
+ "feat_proj_dropout": model_args.feat_proj_dropout,
546
+ "attention_dropout": model_args.attention_dropout,
547
+ "hidden_dropout": model_args.hidden_dropout,
548
+ "final_dropout": model_args.final_dropout,
549
+ "mask_time_prob": model_args.mask_time_prob,
550
+ "mask_time_length": model_args.mask_time_length,
551
+ "mask_feature_prob": model_args.mask_feature_prob,
552
+ "mask_feature_length": model_args.mask_feature_length,
553
+ "gradient_checkpointing": training_args.gradient_checkpointing,
554
+ "layerdrop": model_args.layerdrop,
555
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
556
+ "pad_token_id": tokenizer.pad_token_id,
557
+ "vocab_size": len(tokenizer),
558
+ "activation_dropout": model_args.activation_dropout,
559
+ }
560
+ )
561
+
562
+ # create model
563
+ model = AutoModelForCTC.from_pretrained(
564
+ model_args.model_name_or_path,
565
+ cache_dir=model_args.cache_dir,
566
+ config=config,
567
+ use_auth_token=data_args.use_auth_token,
568
+ )
569
+
570
+ # freeze encoder
571
+ if model_args.freeze_feature_encoder:
572
+ model.freeze_feature_encoder()
573
+
574
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
575
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
576
+ # so that we just need to set the correct target sampling rate and normalize the input
577
+ # via the `feature_extractor`
578
+
579
+ # make sure that dataset decodes audio with correct sampling rate
580
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
581
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
582
+ raw_datasets = raw_datasets.cast_column(
583
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
584
+ )
585
+
586
+ # derive max & min input length for sample rate & max duration
587
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
588
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
589
+ audio_column_name = data_args.audio_column_name
590
+ num_workers = data_args.preprocessing_num_workers
591
+
592
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
593
+ phoneme_language = data_args.phoneme_language
594
+
595
+ # Preprocessing the datasets.
596
+ # We need to read the audio files as arrays and tokenize the targets.
597
+ def prepare_dataset(batch):
598
+ # load audio
599
+ sample = batch[audio_column_name]
600
+
601
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
602
+ batch["input_values"] = inputs.input_values[0]
603
+ batch["input_length"] = len(batch["input_values"])
604
+
605
+ # encode targets
606
+ additional_kwargs = {}
607
+ if phoneme_language is not None:
608
+ additional_kwargs["phonemizer_lang"] = phoneme_language
609
+
610
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
611
+ return batch
612
+
613
+ with training_args.main_process_first(desc="dataset map preprocessing"):
614
+ vectorized_datasets = raw_datasets.map(
615
+ prepare_dataset,
616
+ remove_columns=next(iter(raw_datasets.values())).column_names,
617
+ num_proc=num_workers,
618
+ desc="preprocess datasets",
619
+ )
620
+
621
+ def is_audio_in_length_range(length):
622
+ return length > min_input_length and length < max_input_length
623
+
624
+ # filter data that is shorter than min_input_length
625
+ vectorized_datasets = vectorized_datasets.filter(
626
+ is_audio_in_length_range,
627
+ num_proc=num_workers,
628
+ input_columns=["input_length"],
629
+ )
630
+
631
+ # 7. Next, we can prepare the training.
632
+ # Let's use word error rate (WER) as our evaluation metric,
633
+ # instantiate a data collator and the trainer
634
+
635
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
636
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
637
+
638
+ # for large datasets it is advised to run the preprocessing on a
639
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
640
+ # be a timeout when running the script in distributed mode.
641
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
642
+ # cached dataset
643
+ if data_args.preprocessing_only:
644
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
645
+ return
646
+
647
+ def compute_metrics(pred):
648
+ pred_logits = pred.predictions
649
+ pred_ids = np.argmax(pred_logits, axis=-1)
650
+
651
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
652
+
653
+ pred_str = tokenizer.batch_decode(pred_ids)
654
+ # we do not want to group tokens when computing the metrics
655
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
656
+
657
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
658
+
659
+ return metrics
660
+
661
+ # Now save everything to be able to create a single processor later
662
+ if is_main_process(training_args.local_rank):
663
+ # save feature extractor, tokenizer and config
664
+ feature_extractor.save_pretrained(training_args.output_dir)
665
+ tokenizer.save_pretrained(training_args.output_dir)
666
+ config.save_pretrained(training_args.output_dir)
667
+
668
+ try:
669
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
670
+ except (OSError, KeyError):
671
+ warnings.warn(
672
+ "Loading a processor from a feature extractor config that does not"
673
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
674
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
675
+ " `'processor_class': 'Wav2Vec2Processor'`",
676
+ FutureWarning,
677
+ )
678
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
679
+
680
+ # Instantiate custom data collator
681
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
682
+
683
+ # Initialize Trainer
684
+ trainer = Trainer(
685
+ model=model,
686
+ data_collator=data_collator,
687
+ args=training_args,
688
+ compute_metrics=compute_metrics,
689
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
690
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
691
+ tokenizer=feature_extractor,
692
+ )
693
+
694
+ # 8. Finally, we can start training
695
+
696
+ # Training
697
+ if training_args.do_train:
698
+
699
+ # use last checkpoint if exist
700
+ if last_checkpoint is not None:
701
+ checkpoint = last_checkpoint
702
+ elif os.path.isdir(model_args.model_name_or_path):
703
+ checkpoint = model_args.model_name_or_path
704
+ else:
705
+ checkpoint = None
706
+
707
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
708
+ trainer.save_model()
709
+
710
+ metrics = train_result.metrics
711
+ max_train_samples = (
712
+ data_args.max_train_samples
713
+ if data_args.max_train_samples is not None
714
+ else len(vectorized_datasets["train"])
715
+ )
716
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
717
+
718
+ trainer.log_metrics("train", metrics)
719
+ trainer.save_metrics("train", metrics)
720
+ trainer.save_state()
721
+
722
+ # Evaluation
723
+ results = {}
724
+ if training_args.do_eval:
725
+ logger.info("*** Evaluate ***")
726
+ metrics = trainer.evaluate()
727
+ max_eval_samples = (
728
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
729
+ )
730
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
731
+
732
+ trainer.log_metrics("eval", metrics)
733
+ trainer.save_metrics("eval", metrics)
734
+
735
+ # Write model card and (optionally) push to hub
736
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
737
+ kwargs = {
738
+ "finetuned_from": model_args.model_name_or_path,
739
+ "tasks": "speech-recognition",
740
+ "tags": ["automatic-speech-recognition", data_args.dataset_name, "robust-speech-event"],
741
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
742
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
743
+ }
744
+ if "common_voice" in data_args.dataset_name:
745
+ kwargs["language"] = config_name
746
+
747
+ if training_args.push_to_hub:
748
+ trainer.push_to_hub(**kwargs)
749
+ else:
750
+ trainer.create_model_card(**kwargs)
751
+
752
+ return results
753
+
754
+
755
+ if __name__ == "__main__":
756
+ main()
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "Tahsin-Mayeesha/wav2vec2-bn-300m", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b0fae35f4aef706954b510d11e5212ddb2788564a7d419f32a6700e28d313e7
3
+ size 2991
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_": 1, "a": 2, "b": 3, "c": 4, "d": 5, "e": 6, "f": 7, "g": 8, "h": 9, "i": 10, "j": 11, "k": 12, "l": 13, "m": 14, "n": 15, "o": 16, "p": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "“": 27, "”": 28, "œ": 29, "।": 30, "ঁ": 31, "ং": 32, "ঃ": 33, "অ": 34, "আ": 35, "ই": 36, "ঈ": 37, "উ": 38, "ঊ": 39, "ঋ": 40, "এ": 41, "ঐ": 42, "ও": 43, "ঔ": 44, "ক": 45, "খ": 46, "গ": 47, "ঘ": 48, "ঙ": 49, "চ": 50, "ছ": 51, "জ": 52, "ঝ": 53, "ঞ": 54, "ট": 55, "ঠ": 56, "ড": 57, "ঢ": 58, "ণ": 59, "ত": 60, "থ": 61, "দ": 62, "ধ": 63, "ন": 64, "প": 65, "ফ": 66, "ব": 67, "ভ": 68, "ম": 69, "য": 70, "র": 71, "ল": 72, "শ": 73, "ষ": 74, "স": 75, "হ": 76, "়": 77, "া": 78, "ি": 79, "ী": 80, "ু": 81, "ূ": 82, "ৃ": 83, "ে": 84, "ৈ": 85, "ো": 86, "ৌ": 87, "্": 88, "ৎ": 89, "ৗ": 90, "ড়": 91, "ঢ়": 92, "য়": 93, "০": 94, "১": 95, "২": 96, "৩": 97, "৪": 98, "৫": 99, "৬": 100, "৭": 101, "৮": 102, "৯": 103, "ৰ": 104, "‌": 105, "‍": 106, "‎": 107, "|": 0, "[UNK]": 108, "[PAD]": 109}