parquet-converter commited on
Commit
032cc97
1 Parent(s): 61a3f98

Update parquet files

Browse files
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc-by-nc-sa-4.0
3
- ---
 
 
 
 
cc_gigafida.py DELETED
@@ -1,195 +0,0 @@
1
- """(A publicly available subsample of) a reference corpus of Slovene texts."""
2
-
3
-
4
- import glob
5
- import logging
6
- import os
7
- import os.path
8
- import re
9
- import xml.etree.ElementTree as ET
10
- from copy import deepcopy
11
-
12
- import datasets
13
-
14
- XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
15
-
16
-
17
- def namespace(element):
18
- # https://stackoverflow.com/a/12946675
19
- m = re.match(r'\{.*\}', element.tag)
20
- return m.group(0) if m else ''
21
-
22
-
23
- _CITATION = """\
24
- @misc{ccGigafida,
25
- title = {Written corpus {ccGigafida} 1.0},
26
- author = {Logar, Nata{\v s}a and Erjavec, Toma{\v z} and Krek, Simon and Gr{\v c}ar, Miha and Holozan, Peter},
27
- url = {http://hdl.handle.net/11356/1035},
28
- note = {Slovenian language resource repository {CLARIN}.{SI}},
29
- copyright = {Creative Commons - Attribution-{NonCommercial}-{ShareAlike} 4.0 International ({CC} {BY}-{NC}-{SA} 4.0)},
30
- issn = {2820-4042},
31
- year = {2013}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- The ccGigafida corpus contains a subsample of the Gigafida corpus. The Gigafida corpus is an extensive collection of
37
- Slovene text of various genres, from daily newspapers, magazines, all kinds of books (fiction, non-fiction, textbooks),
38
- web pages, transcriptions of parliamentary debates and similar.
39
- """
40
-
41
- _HOMEPAGE = "http://eng.slovenscina.eu/korpusi/proste-zbirke"
42
-
43
- _LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
44
-
45
- _URLS = {
46
- "ccGigafida": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1035/ccGigafidaV1_0.zip"
47
- }
48
-
49
-
50
- class CcGigafida(datasets.GeneratorBasedBuilder):
51
- """(A publicly available subsample of) a reference corpus of Slovene texts."""
52
-
53
- VERSION = datasets.Version("1.0.0")
54
-
55
- def _info(self):
56
- features = datasets.Features(
57
- {
58
- "id_doc": datasets.Value("string"),
59
- "doc_title": datasets.Value("string"),
60
- "authors": datasets.Sequence(datasets.Value("string")),
61
- "publish_date": datasets.Value("string"),
62
- "publisher": datasets.Value("string"),
63
- "genres": datasets.Sequence(datasets.Value("string")),
64
- "doc_tokenized": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))),
65
- "doc_string": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
66
- "id_sents": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
67
- }
68
- )
69
-
70
- return datasets.DatasetInfo(
71
- description=_DESCRIPTION,
72
- features=features,
73
- homepage=_HOMEPAGE,
74
- license=_LICENSE,
75
- citation=_CITATION,
76
- )
77
-
78
- def _split_generators(self, dl_manager):
79
- # Allow user to specify path to the full Gigafida directory: `load_dataset(..., data_dir=...)`
80
- if dl_manager.manual_dir is not None:
81
- data_dir = dl_manager.manual_dir
82
- else:
83
- urls = _URLS["ccGigafida"]
84
- data_dir = dl_manager.download_and_extract(urls)
85
- data_dir = os.path.join(data_dir, "ccGigafidaV1_0")
86
-
87
- return [
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TRAIN,
90
- gen_kwargs={"data_dir": data_dir}
91
- )
92
- ]
93
-
94
- def _generate_examples(self, data_dir):
95
- GENRE_MAPPING = {
96
- "SSJ.T": "tisk", "SSJ.T.K": "tisk/knjižno", "SSJ.T.K.L": "tisk/knjižno/leposlovno",
97
- "SSJ.T.K.S": "tisk/knjižno/strokovno", "SSJ.T.P": "tisk/periodično", "SSJ.T.P.C": "tisk/periodično/časopis",
98
- "SSJ.T.P.R": "tisk/periodično/revija", "SSJ.T.D": "tisk/drugo", "SSJ.I": "internet"
99
- }
100
- # genres are prefixed by "ssj:" in Gigafida 2.0
101
- for genre, description in deepcopy(GENRE_MAPPING).items():
102
- GENRE_MAPPING[f"ssj:{genre}"] = description
103
-
104
- # Recursively search for xml files in subdirectories
105
- all_files = [os.path.join(data_dir, file_name)
106
- for file_name in glob.glob(os.path.join(data_dir, "**", "*.xml"), recursive=True)
107
- if os.path.isfile(os.path.join(data_dir, file_name))]
108
- all_files = sorted(all_files) # fix order
109
-
110
- for _idx_file, file_path in enumerate(all_files):
111
- curr_doc = ET.parse(file_path)
112
- root = curr_doc.getroot()
113
- NAMESPACE = namespace(root)
114
- id_doc = root.attrib[f"{XML_NAMESPACE}id"]
115
-
116
- # Document metadata
117
- bibl_el = root.find(f".//{NAMESPACE}bibl")
118
- doc_title = bibl_el.find(f"{NAMESPACE}title").text.strip()
119
- authors = list(map(lambda _tag: _tag.text.strip(), bibl_el.findall(f"{NAMESPACE}author")))
120
- publish_date = bibl_el.find(f"{NAMESPACE}date").text.strip()
121
- publisher = bibl_el.find(f"{NAMESPACE}publisher").text.strip()
122
-
123
- category_tags = root.findall(f".//{NAMESPACE}catRef")
124
- genres = []
125
- for _tag in category_tags:
126
- # in ccGigafida, the genres are noted with a "#" prefix
127
- __tag = _tag.attrib["target"][1:] if _tag.attrib["target"].startswith("#") else _tag.attrib["target"]
128
- mapped_tag = GENRE_MAPPING.get(__tag, None)
129
- # In addition to the genre of the document, there is sometimes a category assigned by the deduplication tool (dedup:nodup)
130
- if mapped_tag is None:
131
- continue
132
-
133
- genres.append(mapped_tag)
134
-
135
- # Tokenized and raw string version - raw string version preserves spaces
136
- body_tag = root.find(f".//{NAMESPACE}body")
137
- tokenized_doc, doc_str = [], []
138
- doc_sent_ids = []
139
-
140
- for para_tag in body_tag.findall(f".//{NAMESPACE}p"):
141
- id_para = para_tag.attrib[f"{XML_NAMESPACE}id"]
142
- tokenized_para, para_str = [], []
143
- para_sent_ids = []
144
-
145
- for _idx_sent, sent_tag in enumerate(para_tag.findall(f".//{NAMESPACE}s")):
146
- # ccGigafida does not have sentence IDs:
147
- # construct ID by taking the paragraph ID + their index in the paragraph
148
- id_sent = sent_tag.attrib.get(f"{XML_NAMESPACE}id", None)
149
- if id_sent is None:
150
- id_sent = f"{id_para}.{_idx_sent}"
151
-
152
- tokenized_sent, str_sent = [], []
153
- for child_tag in sent_tag:
154
- tag_str = child_tag.tag[len(NAMESPACE):]
155
- if tag_str not in {"w", "S", "c", "pc"}:
156
- logging.warning(f"Found unexpected tag in a sentence: '{tag_str}', skipping it.")
157
- continue
158
-
159
- # Tag for whitespace in ccGigafida
160
- if tag_str == "S":
161
- str_sent.append(" ")
162
-
163
- # Tag for:
164
- # - single-letter characters in ccGigafida;
165
- # - whitespace in Gigafida
166
- elif tag_str == "c":
167
- str_sent.append(child_tag.text)
168
- if child_tag.text != " ":
169
- tokenized_sent.append(child_tag.text)
170
-
171
- # word or punctuation character
172
- else:
173
- str_sent.append(child_tag.text)
174
- tokenized_sent.append(child_tag.text)
175
-
176
- str_sent = "".join(str_sent)
177
- tokenized_para.append(tokenized_sent)
178
- para_str.append(str_sent)
179
- para_sent_ids.append(id_sent)
180
-
181
- tokenized_doc.append(tokenized_para)
182
- doc_str.append(para_str)
183
- doc_sent_ids.append(para_sent_ids)
184
-
185
- yield _idx_file, {
186
- "id_doc": id_doc,
187
- "doc_title": doc_title,
188
- "authors": authors,
189
- "publish_date": publish_date,
190
- "publisher": publisher,
191
- "genres": genres,
192
- "doc_tokenized": tokenized_doc,
193
- "doc_string": doc_str,
194
- "id_sents": doc_sent_ids
195
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "The ccGigafida corpus contains a subsample of the Gigafida corpus. The Gigafida corpus is an extensive collection of \nSlovene text of various genres, from daily newspapers, magazines, all kinds of books (fiction, non-fiction, textbooks), \nweb pages, transcriptions of parliamentary debates and similar.\n", "citation": "@misc{ccGigafida,\n title = {Written corpus {ccGigafida} 1.0},\n author = {Logar, Nata{\u000b s}a and Erjavec, Toma{\u000b z} and Krek, Simon and Gr{\u000b c}ar, Miha and Holozan, Peter},\n url = {http://hdl.handle.net/11356/1035},\n note = {Slovenian language resource repository {CLARIN}.{SI}},\n copyright = {Creative Commons - Attribution-{NonCommercial}-{ShareAlike} 4.0 International ({CC} {BY}-{NC}-{SA} 4.0)},\n issn = {2820-4042},\n year = {2013}\n}\n", "homepage": "http://eng.slovenscina.eu/korpusi/proste-zbirke", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "doc_title": {"dtype": "string", "id": null, "_type": "Value"}, "authors": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "publish_date": {"dtype": "string", "id": null, "_type": "Value"}, "publisher": {"dtype": "string", "id": null, "_type": "Value"}, "genres": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "doc_tokenized": {"feature": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "doc_string": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "id_sents": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cc_gigafida", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2054478553, "num_examples": 31722, "dataset_name": "cc_gigafida"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1035/ccGigafidaV1_0.zip": {"num_bytes": 954116273, "checksum": "448e284d6f92d54f123e2b7fa433c9d84d060f2db07c589e9b45a356a1f5242e"}}, "download_size": 954116273, "post_processing_size": null, "dataset_size": 2054478553, "size_in_bytes": 3008594826}}
 
 
private/cc_gigafida-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50281efb44aa4b9f9242c5ccae145b0334b2bf6a3c848305e635f93dea440f51
3
+ size 5139