parquet-converter commited on
Commit
88efa8e
1 Parent(s): 90cbeb0

Update parquet files

Browse files
Files changed (3) hide show
  1. .gitattributes +0 -27
  2. lasid.py +0 -326
  3. lasid/lasid-train.parquet +3 -0
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lasid.py DELETED
@@ -1,326 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- # Copyright 2021 Phonetics and Speech Laboratory, Trinity College, Dublin
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- # Lint as: python3
17
-
18
- import sys
19
- from pathlib import Path
20
-
21
- import datasets
22
-
23
- try:
24
- import icu
25
- except ImportError:
26
- sys.exit("ICU not found (hint: pip install pyicu)")
27
-
28
- _DESCRIPTION = """\
29
- Linguistic Atlas and Survey of Irish Dialects, volume 1
30
- """
31
-
32
- _CITATION = """\
33
- @book{wagner1958linguistic,
34
- title={Linguistic Atlas and Survey of Irish Dialects: Introduction, 300 maps.},
35
- author={Wagner, H.},
36
- number={v. 1},
37
- year={1958},
38
- publisher={Dublin Institute for Advanced Studies}
39
- }
40
-
41
- @phdthesis{mckendry1982computer,
42
- title={Computer-aided contributions to the study of Irish dialects},
43
- author={McKendry, Eugene},
44
- year={1982},
45
- school={Queen's University Belfast}
46
- }
47
-
48
- @article{mckendry1998linguistic,
49
- title={The Linguistic Atlas and Survey of Irish Dialects (LASID) and the Computer},
50
- author={McKendry, Eugene},
51
- journal={Studia Celtica Upsaliensia},
52
- volume={2},
53
- pages={345--354},
54
- year={1998}
55
- }
56
- """
57
-
58
- _DATA_URL = "https://www3.smo.uhi.ac.uk/oduibhin/oideasra/lasid/lasid.zip"
59
-
60
- LASID_ICU = """
61
- \x07 → ᵏ ;
62
- \\\t → ᵉ ; # \x09
63
- \x0e → ᴵ ;
64
- \x11 → ʰ ;
65
- \x12 → ⁱ ;
66
- \x13 → ᵒ ;
67
- \x14 → ᵒ̤ ;
68
- \x15 → ʳ ;
69
- \x16 → ˢ ;
70
- \x17 → ᶴ ;
71
- \x18 → ᵗ ;
72
- \x19 → ᵘ ;
73
- \x1a → ᵘ̯ ;
74
- \x1c → ᵛ ;
75
- \x1d → ʷ ;
76
- \x1e → ᶾ ;
77
- \x1f → ᵊ ;
78
- \# → ᶠ ; # \x23
79
- \$ → ᵠ ; # \x24
80
- \% → ᵍ ; # \x25
81
- \& → ᵞ ; # \x26 ˠ for IPA
82
- \' → ’ ; # \x27
83
- \: → ː ; # \x3a
84
- \< → ⁱ̈ ; # \x3c
85
- \= → ⁱ̯ ; # \x3d
86
- \? → ʔ ; # \x3f
87
- \@ → ʲ ; # \x40
88
- E → ᴇ ; # \x45
89
- I → ɪ ; # \x49
90
- L → ʟ ;
91
- N → ɴ ;
92
- R → ʀ ;
93
- \^ → ᵐ ; # \x5e
94
- \_ → ǰ ; # crane, 021 # \x5f
95
- \` → ɛ̀̃ ; # limekiln, 078: \x60
96
- \| → ⁿ ; # lamb, 055: \x7c
97
- \~ → ᵑ ; # dreaming, 078; maybe ⁿ̠ ? # \x7e
98
- \x7f → ᴇ̃ ;
99
- \x80 → φ ; # ɸ
100
- \x81 → ü ;
101
- \x83 → ɛ \u0300 ;
102
- \x84 → è \u0323 ; # FIXME
103
- \\\x85 → è̃ ; # this is �, so it needs to be escaped
104
- \x86 → ũ̜ ; # lamb, 038
105
- \x87 → u̜ ; # finger-nails, 043
106
- \x88 → ʈ ; # looks like t̜ : toothache, 033
107
- \x89 → ᵃ ; # eggs, 066
108
- \x8a → è ;
109
- \x8b → ï ;
110
- \x8c → ɔ̜̃ ; # grandmother, 007
111
- \x8d → ɔ̜ ;
112
- \x8e → ɔ̆ ; # before i go, 078
113
- \x8f → õ̜ ; # as cute, 062
114
- \x91 → æ ;
115
- \x92 → o̜ ;
116
- \x93 → ɖ ;
117
- \x94 → ö ;
118
- \x95 → ɑ̜̃ ;
119
- \x96 → û ; # milking, 067
120
- \x97 → ɑ \u0323 ; # FIXME (maybe α̩ or ɑ̜ ?)
121
- \x98 → v̠ ;
122
- \x99 → t̠ ; # toothache, 021
123
- \x9a → r̠ ;
124
- \x9b → ø ;
125
- \x9c → ɴ̠ ; # sick, 034
126
- \x9d → ŋ̠ ; # grazing, 002
127
- \x9e → n̠ ;
128
- \x9f → l̠ ; # plumage, 068
129
- \xa4 → k̠ ; # plumage, 068
130
- \xa5 → g̠ ;
131
- \xa6 → d̠ ; # wedge, 021
132
- \xa7 → ŭ ;
133
- \xa8 → ö̆ ;
134
- \xa9 → ŏ ;
135
- \xaa → ĭ ;
136
- \xab → ɛ̆ ;
137
- \xac → ĕ ;
138
- \xad → o̤ ;
139
- \xae → λ ;
140
- \xaf → ɑ ; # α in the software
141
- \xb0 → ɔ ;
142
- \xb1 → ɑ̆ \u0323 ; # FIXME
143
- \xb2 → ə ;
144
- \xb4 → ᵈ ; # tail, 007
145
- \xb6 → ɑ̆ ; # ᾰ in the software
146
- \xb7 → ă ;
147
- \xb8 → λ \u0323 ; # FIXME
148
- \xb9 → ɛ ;
149
- \xba → ʃ \u030c ; # calling, 067
150
- \xbb → š ;
151
- \xbc → ř ;
152
- \xbd → ɑ̃ ;
153
- \xbe → ẽ ; # tied, 88N
154
- \xc1 → ′ ; # superscript prime
155
- \xc5 → ᴍ̠ ; # fart, 071
156
- \xc6 → ã ; # calf, 046
157
- \xc7 → t \u0323 ; # probably t̞
158
- \xc8 → λ̯ ; # mane, 067
159
- \xc9 → o̯ ; # hare, 088
160
- \xca → Ɫ ; # loaf, 001
161
- \xcb → ɫ ; # loaf, 003
162
- \xcc → m̥ ; # awake, 001
163
- \xcd → ʀ̥ ; # thieving, 003
164
- \xce → ˈ ;
165
- \xcf → ˌ ; # cattle, 040
166
- \xd0 → ð ; # boar, 88N
167
- \xd1 → s \u0323 ; # FIXME # slime 008
168
- \xd2 → r \u0323 ; # FIXME # bulls 067
169
- \xd3 → ɪ̆ ; # suit of clothes 039
170
- \xd4 → ᴇ̀ ;
171
- \xd5 → p \u0323 ; # FIXME # castrating 053
172
- \xd7 → ɪ̃ ; # slime, 007
173
- \xd8 → ɪ̈ ; # calf 027
174
- \xdb → o \u0323 ; # FIXME # cow 028
175
- \xdc → ŋ \u0323 ; # FIXME # tied 078
176
- \xdd → ö̤ ;
177
- \xde → k \u0323 ; # FIXME
178
- \xdf → i \u0323 ; # FIXME # sick 069
179
- \xe1 → g \u0323 ; # FIXME
180
- \xe2 → e \u0323 ; # FIXME
181
- \xe3 → d \u0323 ; # FIXME # agut 052
182
- \xe4 → õ ; # I shall tie 062
183
- \xe5 → b \u0323 ; # FIXME # castrating 071
184
- \xe6 → ɑ̃ \u0323 ; #FIXME # barking 049
185
- \xe7 → ɑ \u0323 ; # FIXME # slime 008
186
- \xe8 → ỹ ;
187
- \xea → λ̃ ;
188
- \xeb → ü̃ ; # churn-dash, 011
189
- \xec → ũ ;
190
- \xed → ɔ̃ ; # cow 074
191
- \xee → õ̤ ; # barking 055
192
- \xef → ′ ;
193
- \xf0 → ″ ;
194
- \xf1 → ö̤̃ ; # dreaming, 078
195
- \xf2 → ö̃ ; # sheep shears 074
196
- \xf3 → ï̃ ; # churn-dash, 034
197
- \xf4 → ĩ ; # sick 001
198
- \xf5 → ɣ̃ ; # tied 075
199
- \xf6 → ɛ̃ ; # tied 067
200
- \xf7 → n̥ ; # awake, 059
201
- \xf8 → r̥ ; # slime 002
202
- \xf9 → ʃ ;
203
- \xfb → · ; # slime 058
204
- \xfa → ɣ ;
205
- \xfc → χ ; # limekiln, 080
206
- \xfd → ʒ ; # sheep shears 054
207
- \xfe → ŋ ;
208
- """
209
-
210
- LASID_TITLES_ICU = """
211
- \xb5 → Á ;
212
- \xd6 → Í ;
213
- \x90 → É ;
214
- \xe0 → Ó ;
215
- \xe9 → Ú ;
216
- """
217
-
218
- def transliterator_from_rules(name, rules):
219
- fromrules = icu.Transliterator.createFromRules(name, rules)
220
- icu.Transliterator.registerInstance(fromrules)
221
- return icu.Transliterator.createInstance(name)
222
-
223
- LASID = transliterator_from_rules('lasid_icu', LASID_ICU)
224
- TITLES = transliterator_from_rules('lasid_titles', LASID_TITLES_ICU)
225
-
226
- def translit_phon(text):
227
- # could have been any 8-bit encoding
228
- return LASID.transliterate(text.decode('ISO-8859-1').rstrip())
229
-
230
- def translit_irish(text):
231
- return TITLES.transliterate(text.decode('ISO-8859-1').rstrip())
232
-
233
-
234
- class LasidDataset(datasets.GeneratorBasedBuilder):
235
- """Scraper dataset for LASID."""
236
-
237
- VERSION = datasets.Version("1.1.0")
238
-
239
- BUILDER_CONFIGS = [
240
- datasets.BuilderConfig(name="lasid"),
241
- ]
242
-
243
- def _info(self):
244
- features = datasets.Features(
245
- {
246
- "english": datasets.Value("string"),
247
- "irish": datasets.Value("string"),
248
- "map_id": datasets.Value("string"),
249
- "place_ids": datasets.Sequence(datasets.Value("string")),
250
- "transcripts": datasets.Sequence(datasets.Value("string")),
251
- }
252
- )
253
-
254
- return datasets.DatasetInfo(
255
- description=_DESCRIPTION,
256
- features=features,
257
- supervised_keys=None,
258
- citation=_CITATION
259
- )
260
-
261
- def _split_generators(self, dl_manager):
262
- """Returns SplitGenerators."""
263
- dl_path = dl_manager.download_and_extract(_DATA_URL)
264
- infile = f"{dl_path}/mapdata.dat"
265
-
266
- return [
267
- datasets.SplitGenerator(
268
- name=datasets.Split.TRAIN,
269
- gen_kwargs={
270
- "split": "train",
271
- "data_file": infile
272
- },
273
- ),
274
- ]
275
-
276
- def _generate_examples(
277
- self, split, data_file
278
- ):
279
- """ Yields examples as (key, example) tuples. """
280
- data = process_lasid(data_file)
281
- _id = 1
282
- for map in data.keys():
283
- item = data[map]
284
- place_ids = list(item["data"])
285
- transcripts = [item["data"][a] for a in place_ids]
286
-
287
- yield _id, {
288
- "english": item.get("en", ""),
289
- "irish": item.get("ga", ""),
290
- "map_id": item.get("id", ""),
291
- "place_ids": place_ids,
292
- "transcripts": transcripts
293
- }
294
- _id += 1
295
-
296
-
297
- def process_lasid(filename):
298
- data = {}
299
- cur = {}
300
- en = ''
301
- ga = ''
302
- id = ''
303
- with open(filename, "rb") as file:
304
- for line in file.readlines():
305
- if b'{M' in line:
306
- if en and id:
307
- tmp = {}
308
- tmp['en'] = en
309
- tmp['id'] = id
310
- tmp['ga'] = ga
311
- tmp['data'] = cur
312
- data[id] = tmp
313
- text = line.decode('ISO-8859-1').rstrip()
314
- id = text[3:7].strip()
315
- en = text[7:-1].strip()
316
- cur = {}
317
- elif b'{F' in line:
318
- raw = translit_irish(line)
319
- ga = raw[3:-1].strip()
320
- elif line.decode('ISO-8859-1')[0:1].isnumeric():
321
- pid = line.decode('ISO-8859-1')[0:3]
322
- ptext = translit_phon(line[3:-1])
323
- if ptext[-1] == '*':
324
- ptext = ptext[0:-1]
325
- cur[pid] = ptext.strip()
326
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lasid/lasid-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e082ea4d76b5e2a83286c1e0a68e9bdf9bb8b41480ca6b0f3f7c8147651aa688
3
+ size 254501