jcmc commited on
Commit
7abf99e
1 Parent(s): 4bb51bd

Upload lm-boosted decoder

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ 5gram.arpa filter=lfs diff=lfs merge=lfs -text
29
+ 5gram_correct.arpa filter=lfs diff=lfs merge=lfs -text
30
+ text.txt filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/n-gram-checkpoint.ipynb ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "2e612e3a",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "target_lang=\"ga-IE\" # change to your target lang"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 39,
16
+ "id": "7fe65d91",
17
+ "metadata": {},
18
+ "outputs": [
19
+ {
20
+ "name": "stderr",
21
+ "output_type": "stream",
22
+ "text": [
23
+ "Using custom data configuration ga-pl-lang1=ga,lang2=pl\n",
24
+ "Reusing dataset opus_dgt (/workspace/cache/hf/datasets/opus_dgt/ga-pl-lang1=ga,lang2=pl/0.0.0/a4db75cea3712eb5d4384f0539db82abf897c6b6da5e5e81693e8fd201efc346)\n"
25
+ ]
26
+ }
27
+ ],
28
+ "source": [
29
+ "from datasets import load_dataset\n",
30
+ "\n",
31
+ "# dataset = load_dataset(\"mozilla-foundation/common_voice_8_0\", \n",
32
+ "# \"ga-IE\", \n",
33
+ "# split=\"train\", \n",
34
+ "# use_auth_token = True)\n",
35
+ "\n",
36
+ "dataset = load_dataset(\"opus_dgt\", lang1=\"ga\", lang2=\"pl\", split = 'train')"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": 45,
42
+ "id": "03e44482",
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "ga_txt = [i['ga'] for i in dataset['translation']]"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 46,
52
+ "id": "c828175b",
53
+ "metadata": {},
54
+ "outputs": [
55
+ {
56
+ "ename": "NameError",
57
+ "evalue": "name 'ga_text' is not defined",
58
+ "output_type": "error",
59
+ "traceback": [
60
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
61
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
62
+ "\u001b[0;32m<ipython-input-46-c49fc06c912c>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mga_text\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
63
+ "\u001b[0;31mNameError\u001b[0m: name 'ga_text' is not defined"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "ga_text"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 19,
74
+ "id": "cdb72a9d",
75
+ "metadata": {},
76
+ "outputs": [],
77
+ "source": [
78
+ "chars_to_ignore_regex = '[,?.!\\-\\;\\:\"“%‘”�—’…–]' # change to the ignored characters of your fine-tuned model"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 20,
84
+ "id": "4823df21",
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "import re\n",
89
+ "\n",
90
+ "def extract_text(batch):\n",
91
+ " text = batch[\"sentence\"]\n",
92
+ " batch[\"text_clean\"] = re.sub(chars_to_ignore_regex, \"\", text.lower())\n",
93
+ " return batch"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": 21,
99
+ "id": "d2b27f75",
100
+ "metadata": {},
101
+ "outputs": [
102
+ {
103
+ "name": "stderr",
104
+ "output_type": "stream",
105
+ "text": [
106
+ "Loading cached processed dataset at /workspace/cache/hf/datasets/mozilla-foundation___common_voice/ga-IE/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8/cache-f9f6dd3027923e5a.arrow\n"
107
+ ]
108
+ }
109
+ ],
110
+ "source": [
111
+ "dataset = dataset.map(extract_text, remove_columns=dataset.column_names)"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": null,
117
+ "id": "91244c41",
118
+ "metadata": {},
119
+ "outputs": [],
120
+ "source": []
121
+ }
122
+ ],
123
+ "metadata": {
124
+ "kernelspec": {
125
+ "display_name": "Python 3",
126
+ "language": "python",
127
+ "name": "python3"
128
+ },
129
+ "language_info": {
130
+ "codemirror_mode": {
131
+ "name": "ipython",
132
+ "version": 3
133
+ },
134
+ "file_extension": ".py",
135
+ "mimetype": "text/x-python",
136
+ "name": "python",
137
+ "nbconvert_exporter": "python",
138
+ "pygments_lexer": "ipython3",
139
+ "version": "3.8.8"
140
+ }
141
+ },
142
+ "nbformat": 4,
143
+ "nbformat_minor": 5
144
+ }
5gram.arpa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a5b3058d8cca7e1a61aa31b7ab0907fdb6ff7a104dfef12d8d470b2513c391
3
+ size 376008972
5gram_correct.arpa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8d79210ff27e6e122fa9af6411f860d85ca20ecac3d76bb4d716341b467e7a8
3
+ size 376008991
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "x", "y", "\u00e1", "\u00e9", "\u00ed", "\u00f3", "\u00fa", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/5gram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1238bb0e9f91c4250009bb8496c4732cad0da6f6a9fbaa945cb5782af4a4bbdc
3
+ size 173705975
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
 
n-gram.ipynb ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 51,
6
+ "id": "831245a1",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pandas as pd"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 1,
16
+ "id": "2ac8a30f",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "target_lang=\"ga-IE\" # change to your target lang"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 101,
26
+ "id": "15710167",
27
+ "metadata": {},
28
+ "outputs": [
29
+ {
30
+ "name": "stderr",
31
+ "output_type": "stream",
32
+ "text": [
33
+ "Using custom data configuration ga-pl-lang1=ga,lang2=pl\n",
34
+ "Reusing dataset opus_dgt (/workspace/cache/hf/datasets/opus_dgt/ga-pl-lang1=ga,lang2=pl/0.0.0/a4db75cea3712eb5d4384f0539db82abf897c6b6da5e5e81693e8fd201efc346)\n"
35
+ ]
36
+ }
37
+ ],
38
+ "source": [
39
+ "from datasets import load_dataset\n",
40
+ "\n",
41
+ "# dataset = load_dataset(\"mozilla-foundation/common_voice_8_0\", \n",
42
+ "# \"ga-IE\", \n",
43
+ "# split=\"train\", \n",
44
+ "# use_auth_token = True)\n",
45
+ "\n",
46
+ "dataset = load_dataset(\"opus_dgt\", lang1=\"ga\", lang2=\"pl\", split = 'train')"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 102,
52
+ "id": "fb20d4de",
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "# ga_txt = [i['ga'] for i in dataset['translation']]\n",
57
+ "# ga_txt = pd.Series(ga_txt)"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 103,
63
+ "id": "eeca1851",
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "chars_to_ignore_regex = '[,?.!\\-\\;\\:\"“%‘”�—’…–]' # change to the ignored characters of your fine-tuned model"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 107,
73
+ "id": "4df93c9c",
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "import re\n",
78
+ "\n",
79
+ "def extract_text(batch):\n",
80
+ " text = batch[\"translation\"]\n",
81
+ " ga_text = text['ga']\n",
82
+ " batch[\"text\"] = re.sub(chars_to_ignore_regex, \"\", ga_text.lower())\n",
83
+ " return batch"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 108,
89
+ "id": "84bedd13",
90
+ "metadata": {},
91
+ "outputs": [
92
+ {
93
+ "data": {
94
+ "application/vnd.jupyter.widget-view+json": {
95
+ "model_id": "d9a11f167bb94faa8e9f6a511407acb4",
96
+ "version_major": 2,
97
+ "version_minor": 0
98
+ },
99
+ "text/plain": [
100
+ "0ex [00:00, ?ex/s]"
101
+ ]
102
+ },
103
+ "metadata": {},
104
+ "output_type": "display_data"
105
+ }
106
+ ],
107
+ "source": [
108
+ "dataset = dataset.map(extract_text, remove_columns=dataset.column_names)"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 112,
114
+ "id": "31cb3c6b",
115
+ "metadata": {},
116
+ "outputs": [
117
+ {
118
+ "data": {
119
+ "application/vnd.jupyter.widget-view+json": {
120
+ "model_id": "342d92a5d9c44c59bcb5dca143ced3b6",
121
+ "version_major": 2,
122
+ "version_minor": 0
123
+ },
124
+ "text/plain": [
125
+ "Pushing dataset shards to the dataset hub: 0%| | 0/1 [00:00<?, ?it/s]"
126
+ ]
127
+ },
128
+ "metadata": {},
129
+ "output_type": "display_data"
130
+ }
131
+ ],
132
+ "source": [
133
+ "dataset.push_to_hub(f\"{target_lang}_opus_dgt_train\", split=\"train\")"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "markdown",
138
+ "id": "70952673",
139
+ "metadata": {},
140
+ "source": [
141
+ "## N-gram KenLM"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": 116,
147
+ "id": "51756959",
148
+ "metadata": {},
149
+ "outputs": [
150
+ {
151
+ "data": {
152
+ "application/vnd.jupyter.widget-view+json": {
153
+ "model_id": "38d3c229117f4e60a7778f974ac609de",
154
+ "version_major": 2,
155
+ "version_minor": 0
156
+ },
157
+ "text/plain": [
158
+ "Downloading: 0%| | 0.00/1.60k [00:00<?, ?B/s]"
159
+ ]
160
+ },
161
+ "metadata": {},
162
+ "output_type": "display_data"
163
+ },
164
+ {
165
+ "name": "stderr",
166
+ "output_type": "stream",
167
+ "text": [
168
+ "Using custom data configuration jcmc--ga-IE_opus_dgt_train-aa318da91f5f84f6\n"
169
+ ]
170
+ },
171
+ {
172
+ "name": "stdout",
173
+ "output_type": "stream",
174
+ "text": [
175
+ "Downloading and preparing dataset opus_dgt/ga-pl (download: 12.11 MiB, generated: 28.99 MiB, post-processed: Unknown size, total: 41.11 MiB) to /workspace/cache/hf/datasets/parquet/jcmc--ga-IE_opus_dgt_train-aa318da91f5f84f6/0.0.0/1638526fd0e8d960534e2155dc54fdff8dce73851f21f031d2fb9c2cf757c121...\n"
176
+ ]
177
+ },
178
+ {
179
+ "data": {
180
+ "application/vnd.jupyter.widget-view+json": {
181
+ "model_id": "e5e07f18549b443ead74991a9b338593",
182
+ "version_major": 2,
183
+ "version_minor": 0
184
+ },
185
+ "text/plain": [
186
+ " 0%| | 0/1 [00:00<?, ?it/s]"
187
+ ]
188
+ },
189
+ "metadata": {},
190
+ "output_type": "display_data"
191
+ },
192
+ {
193
+ "data": {
194
+ "application/vnd.jupyter.widget-view+json": {
195
+ "model_id": "0e83c78fa1bc43f19a56b623c92a64a4",
196
+ "version_major": 2,
197
+ "version_minor": 0
198
+ },
199
+ "text/plain": [
200
+ "Downloading: 0%| | 0.00/12.7M [00:00<?, ?B/s]"
201
+ ]
202
+ },
203
+ "metadata": {},
204
+ "output_type": "display_data"
205
+ },
206
+ {
207
+ "data": {
208
+ "application/vnd.jupyter.widget-view+json": {
209
+ "model_id": "06649f5cd3324eb49a1bd09b68aa23b6",
210
+ "version_major": 2,
211
+ "version_minor": 0
212
+ },
213
+ "text/plain": [
214
+ " 0%| | 0/1 [00:00<?, ?it/s]"
215
+ ]
216
+ },
217
+ "metadata": {},
218
+ "output_type": "display_data"
219
+ },
220
+ {
221
+ "name": "stdout",
222
+ "output_type": "stream",
223
+ "text": [
224
+ "Dataset parquet downloaded and prepared to /workspace/cache/hf/datasets/parquet/jcmc--ga-IE_opus_dgt_train-aa318da91f5f84f6/0.0.0/1638526fd0e8d960534e2155dc54fdff8dce73851f21f031d2fb9c2cf757c121. Subsequent calls will reuse this data.\n"
225
+ ]
226
+ }
227
+ ],
228
+ "source": [
229
+ "from datasets import load_dataset\n",
230
+ "\n",
231
+ "dataset = load_dataset(\"jcmc/ga-IE_opus_dgt_train\", split=\"train\")\n",
232
+ "\n",
233
+ "with open(\"text.txt\", \"w\") as file:\n",
234
+ " file.write(\" \".join(dataset[\"text\"]))"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": 118,
240
+ "id": "77eb3a41",
241
+ "metadata": {},
242
+ "outputs": [
243
+ {
244
+ "name": "stdout",
245
+ "output_type": "stream",
246
+ "text": [
247
+ "=== 1/5 Counting and sorting n-grams ===\n",
248
+ "Reading /workspace/wav2vec-1b-cv8-ir/text.txt\n",
249
+ "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
250
+ "****************************************************************************************************\n",
251
+ "Unigram tokens 4378228 types 70781\n",
252
+ "=== 2/5 Calculating and sorting adjusted counts ===\n",
253
+ "Chain sizes: 1:849372 2:14475680768 3:27141902336 4:43427041280 5:63331106816\n",
254
+ "Statistics:\n",
255
+ "1 70780 D1=0.684187 D2=1.0538 D3+=1.37643\n",
256
+ "2 652306 D1=0.766205 D2=1.12085 D3+=1.39031\n",
257
+ "3 1669326 D1=0.84217 D2=1.20654 D3+=1.39941\n",
258
+ "4 2514789 D1=0.896214 D2=1.29731 D3+=1.47431\n",
259
+ "5 3053088 D1=0.794858 D2=1.47897 D3+=1.5117\n",
260
+ "Memory estimate for binary LM:\n",
261
+ "type MB\n",
262
+ "probing 164 assuming -p 1.5\n",
263
+ "probing 192 assuming -r models -p 1.5\n",
264
+ "trie 77 without quantization\n",
265
+ "trie 42 assuming -q 8 -b 8 quantization \n",
266
+ "trie 69 assuming -a 22 array pointer compression\n",
267
+ "trie 34 assuming -a 22 -q 8 -b 8 array pointer compression and quantization\n",
268
+ "=== 3/5 Calculating and sorting initial probabilities ===\n",
269
+ "Chain sizes: 1:849360 2:10436896 3:33386520 4:60354936 5:85486464\n",
270
+ "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
271
+ "####################################################################################################\n",
272
+ "=== 4/5 Calculating and writing order-interpolated probabilities ===\n",
273
+ "Chain sizes: 1:849360 2:10436896 3:33386520 4:60354936 5:85486464\n",
274
+ "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
275
+ "####################################################################################################\n",
276
+ "=== 5/5 Writing ARPA model ===\n",
277
+ "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
278
+ "****************************************************************************************************\n",
279
+ "Name:lmplz\tVmPeak:145097728 kB\tVmRSS:51788 kB\tRSSMax:25679020 kB\tuser:9.15304\tsys:14.1178\tCPU:23.2708\treal:20.9339\n"
280
+ ]
281
+ }
282
+ ],
283
+ "source": [
284
+ "!../kenlm/build/bin/lmplz -o 5 <\"text.txt\" > \"5gram.arpa\""
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": 122,
290
+ "id": "0e043b87",
291
+ "metadata": {},
292
+ "outputs": [],
293
+ "source": [
294
+ "with open(\"5gram.arpa\", \"r\") as read_file, open(\"5gram_correct.arpa\", \"w\") as write_file:\n",
295
+ " has_added_eos = False\n",
296
+ " for line in read_file:\n",
297
+ " if not has_added_eos and \"ngram 1=\" in line:\n",
298
+ " count=line.strip().split(\"=\")[-1]\n",
299
+ " write_file.write(line.replace(f\"{count}\", f\"{int(count)+1}\"))\n",
300
+ " elif not has_added_eos and \"<s>\" in line:\n",
301
+ " write_file.write(line)\n",
302
+ " write_file.write(line.replace(\"<s>\", \"</s>\"))\n",
303
+ " has_added_eos = True\n",
304
+ " else:\n",
305
+ " write_file.write(line)"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": 123,
311
+ "id": "d106c7d1",
312
+ "metadata": {},
313
+ "outputs": [
314
+ {
315
+ "name": "stdout",
316
+ "output_type": "stream",
317
+ "text": [
318
+ "\\data\\\n",
319
+ "ngram 1=70781\n",
320
+ "ngram 2=652306\n",
321
+ "ngram 3=1669326\n",
322
+ "ngram 4=2514789\n",
323
+ "ngram 5=3053088\n",
324
+ "\n",
325
+ "\\1-grams:\n",
326
+ "-5.8501472\t<unk>\t0\n",
327
+ "0\t<s>\t-0.11565505\n",
328
+ "0\t</s>\t-0.11565505\n",
329
+ "-5.4088216\tmiontuairisc\t-0.20133564\n",
330
+ "-4.6517477\tcheartaitheach\t-0.24842946\n",
331
+ "-2.1893916\tmaidir\t-1.7147961\n",
332
+ "-2.1071756\tle\t-0.7007309\n",
333
+ "-4.156014\tcoinbhinsiún\t-0.31064242\n",
334
+ "-1.8876181\tar\t-0.9045828\n",
335
+ "-4.62287\tdhlínse\t-0.24268326\n",
336
+ "-1.6051095\tagus\t-0.8729715\n",
337
+ "-4.1465816\taithint\t-0.21693327\n"
338
+ ]
339
+ }
340
+ ],
341
+ "source": [
342
+ "!head -20 5gram_correct.arpa"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": 124,
348
+ "id": "85ef4c43",
349
+ "metadata": {},
350
+ "outputs": [],
351
+ "source": [
352
+ "from transformers import AutoProcessor\n",
353
+ "\n",
354
+ "processor = AutoProcessor.from_pretrained(\"./\")"
355
+ ]
356
+ },
357
+ {
358
+ "cell_type": "code",
359
+ "execution_count": 125,
360
+ "id": "cb2a2768",
361
+ "metadata": {},
362
+ "outputs": [],
363
+ "source": [
364
+ "vocab_dict = processor.tokenizer.get_vocab()\n",
365
+ "sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}"
366
+ ]
367
+ },
368
+ {
369
+ "cell_type": "code",
370
+ "execution_count": 126,
371
+ "id": "d19eee6f",
372
+ "metadata": {},
373
+ "outputs": [
374
+ {
375
+ "name": "stderr",
376
+ "output_type": "stream",
377
+ "text": [
378
+ "Found entries of length > 1 in alphabet. This is unusual unless style is BPE, but the alphabet was not recognized as BPE type. Is this correct?\n",
379
+ "Unigrams and labels don't seem to agree.\n"
380
+ ]
381
+ }
382
+ ],
383
+ "source": [
384
+ "from pyctcdecode import build_ctcdecoder\n",
385
+ "\n",
386
+ "decoder = build_ctcdecoder(\n",
387
+ " labels=list(sorted_vocab_dict.keys()),\n",
388
+ " kenlm_model_path=\"5gram_correct.arpa\",\n",
389
+ ")"
390
+ ]
391
+ },
392
+ {
393
+ "cell_type": "code",
394
+ "execution_count": 127,
395
+ "id": "4e8031a9",
396
+ "metadata": {},
397
+ "outputs": [],
398
+ "source": [
399
+ "from transformers import Wav2Vec2ProcessorWithLM\n",
400
+ "\n",
401
+ "processor_with_lm = Wav2Vec2ProcessorWithLM(\n",
402
+ " feature_extractor=processor.feature_extractor,\n",
403
+ " tokenizer=processor.tokenizer,\n",
404
+ " decoder=decoder\n",
405
+ ")"
406
+ ]
407
+ },
408
+ {
409
+ "cell_type": "code",
410
+ "execution_count": 128,
411
+ "id": "6f32faf4",
412
+ "metadata": {},
413
+ "outputs": [
414
+ {
415
+ "name": "stderr",
416
+ "output_type": "stream",
417
+ "text": [
418
+ "/workspace/wav2vec-1b-cv8-ir/./ is already a clone of https://huggingface.co/jcmc/wav2vec-1b-cv8-ir. Make sure you pull the latest changes with `repo.git_pull()`.\n"
419
+ ]
420
+ }
421
+ ],
422
+ "source": [
423
+ "from huggingface_hub import Repository\n",
424
+ "\n",
425
+ "repo = Repository(local_dir=\"./\", clone_from=\"jcmc/wav2vec-1b-cv8-ir\")"
426
+ ]
427
+ },
428
+ {
429
+ "cell_type": "code",
430
+ "execution_count": 129,
431
+ "id": "a7e91068",
432
+ "metadata": {},
433
+ "outputs": [
434
+ {
435
+ "data": {
436
+ "text/plain": [
437
+ "'/workspace/wav2vec-1b-cv8-ir'"
438
+ ]
439
+ },
440
+ "execution_count": 129,
441
+ "metadata": {},
442
+ "output_type": "execute_result"
443
+ }
444
+ ],
445
+ "source": [
446
+ "pwd"
447
+ ]
448
+ },
449
+ {
450
+ "cell_type": "code",
451
+ "execution_count": null,
452
+ "id": "0a1de336",
453
+ "metadata": {},
454
+ "outputs": [],
455
+ "source": [
456
+ "processor_with_lm.save_pretrained(\"xls-r-300m-sv\")"
457
+ ]
458
+ }
459
+ ],
460
+ "metadata": {
461
+ "kernelspec": {
462
+ "display_name": "Python 3",
463
+ "language": "python",
464
+ "name": "python3"
465
+ },
466
+ "language_info": {
467
+ "codemirror_mode": {
468
+ "name": "ipython",
469
+ "version": 3
470
+ },
471
+ "file_extension": ".py",
472
+ "mimetype": "text/x-python",
473
+ "name": "python",
474
+ "nbconvert_exporter": "python",
475
+ "pygments_lexer": "ipython3",
476
+ "version": "3.8.8"
477
+ }
478
+ },
479
+ "nbformat": 4,
480
+ "nbformat_minor": 5
481
+ }
preprocessor_config.json CHANGED
@@ -4,6 +4,7 @@
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0,
 
7
  "return_attention_mask": true,
8
  "sampling_rate": 16000
9
  }
 
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
  "return_attention_mask": true,
9
  "sampling_rate": 16000
10
  }
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
text.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab790db751ff5b9ddc7575cb8fe62d57c8a4b8fdda4950b2f9a17aad421bfffc
3
+ size 29862507
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}