Upload ukrainian-treebank-lm.py
Browse files- ukrainian-treebank-lm.py +56 -49
ukrainian-treebank-lm.py
CHANGED
@@ -13,7 +13,6 @@
|
|
13 |
# limitations under the License.
|
14 |
"""Ukrainian Treebank (Language Modeling) - dataset by Universal Dependencies preprocessed for language modeling"""
|
15 |
|
16 |
-
|
17 |
import itertools
|
18 |
import operator
|
19 |
import conllu
|
@@ -55,22 +54,18 @@ _URLS = {
|
|
55 |
class UkrainianTreebankLMConfig(datasets.BuilderConfig):
|
56 |
"""BuilderConfig for UkrainianTreebankLM"""
|
57 |
|
58 |
-
def __init__(self, *args, split_by: str = 'paragraph',
|
59 |
"""BuilderConfig for UkrainianTreebankLM.
|
60 |
Args:
|
61 |
*args: keyword arguments forwarded to super.
|
62 |
split_by: one of 'document', 'paragraph', 'sentence'
|
63 |
-
split: one of 'all', 'train', 'dev', 'test'
|
64 |
**kwargs: keyword arguments forwarded to super.
|
65 |
"""
|
66 |
super(UkrainianTreebankLMConfig, self).__init__(*args, **kwargs)
|
67 |
assert split_by in ['document', 'paragraph', 'sentence'], \
|
68 |
"split_by should be one of 'document', 'paragraph', 'sentence'"
|
69 |
-
assert split in ['all', 'train', 'dev', 'test'], \
|
70 |
-
"split should be one of 'all', 'train', 'dev', 'test'"
|
71 |
|
72 |
self.split_by = split_by
|
73 |
-
self.split = split
|
74 |
|
75 |
|
76 |
class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
|
@@ -80,16 +75,14 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
|
|
80 |
BUILDER_CONFIG_CLASS = UkrainianTreebankLMConfig
|
81 |
BUILDER_CONFIGS = [
|
82 |
UkrainianTreebankLMConfig(
|
83 |
-
name=
|
84 |
-
description=f"
|
85 |
split_by=split_by,
|
86 |
-
split=split
|
87 |
)
|
88 |
-
for
|
89 |
-
['document', 'paragraph', 'sentence'])
|
90 |
]
|
91 |
|
92 |
-
DEFAULT_CONFIG_NAME = "
|
93 |
|
94 |
def _info(self):
|
95 |
if self.config.split_by == "document":
|
@@ -131,51 +124,66 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
|
|
131 |
)
|
132 |
|
133 |
def _split_generators(self, dl_manager):
|
134 |
-
|
135 |
-
urls = list(_URLS.values())
|
136 |
-
else:
|
137 |
-
urls = [_URLS[self.config.split]]
|
138 |
-
filepaths = dl_manager.download_and_extract(urls)
|
139 |
return [
|
140 |
datasets.SplitGenerator(
|
141 |
-
name=
|
142 |
-
gen_kwargs={
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
]
|
149 |
|
150 |
def _parse(self, filepath):
|
151 |
with open(filepath, 'r') as f:
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
def _generate_examples_sentence(self, sentence_iterator):
|
173 |
for sentence in sentence_iterator:
|
174 |
yield sentence["sentence_id"], sentence
|
175 |
|
176 |
def _generate_examples_paragraph(self, sentence_iterator):
|
177 |
-
|
178 |
-
for key, group in itertools.groupby(sorted_sentences,
|
179 |
operator.itemgetter("paragraph_id")):
|
180 |
try:
|
181 |
sentence = next(group)
|
@@ -192,8 +200,7 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
|
|
192 |
}
|
193 |
|
194 |
def _generate_examples_document(self, sentence_iterator):
|
195 |
-
|
196 |
-
for key, group in itertools.groupby(sorted_sentences,
|
197 |
operator.itemgetter("document_id")):
|
198 |
try:
|
199 |
sentence = next(group)
|
@@ -208,8 +215,8 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
|
|
208 |
"document_title": sentence["document_title"],
|
209 |
}
|
210 |
|
211 |
-
def _generate_examples(self,
|
212 |
-
sentence_iterator = self.
|
213 |
|
214 |
if self.config.split_by == "document":
|
215 |
yield from self._generate_examples_document(sentence_iterator)
|
|
|
13 |
# limitations under the License.
|
14 |
"""Ukrainian Treebank (Language Modeling) - dataset by Universal Dependencies preprocessed for language modeling"""
|
15 |
|
|
|
16 |
import itertools
|
17 |
import operator
|
18 |
import conllu
|
|
|
54 |
class UkrainianTreebankLMConfig(datasets.BuilderConfig):
|
55 |
"""BuilderConfig for UkrainianTreebankLM"""
|
56 |
|
57 |
+
def __init__(self, *args, split_by: str = 'paragraph', **kwargs):
|
58 |
"""BuilderConfig for UkrainianTreebankLM.
|
59 |
Args:
|
60 |
*args: keyword arguments forwarded to super.
|
61 |
split_by: one of 'document', 'paragraph', 'sentence'
|
|
|
62 |
**kwargs: keyword arguments forwarded to super.
|
63 |
"""
|
64 |
super(UkrainianTreebankLMConfig, self).__init__(*args, **kwargs)
|
65 |
assert split_by in ['document', 'paragraph', 'sentence'], \
|
66 |
"split_by should be one of 'document', 'paragraph', 'sentence'"
|
|
|
|
|
67 |
|
68 |
self.split_by = split_by
|
|
|
69 |
|
70 |
|
71 |
class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
|
|
|
75 |
BUILDER_CONFIG_CLASS = UkrainianTreebankLMConfig
|
76 |
BUILDER_CONFIGS = [
|
77 |
UkrainianTreebankLMConfig(
|
78 |
+
name=split_by,
|
79 |
+
description=f"Ukrainian Treebank split by {split_by}",
|
80 |
split_by=split_by,
|
|
|
81 |
)
|
82 |
+
for split_by in ['document', 'paragraph', 'sentence']
|
|
|
83 |
]
|
84 |
|
85 |
+
DEFAULT_CONFIG_NAME = "paragraph"
|
86 |
|
87 |
def _info(self):
|
88 |
if self.config.split_by == "document":
|
|
|
124 |
)
|
125 |
|
126 |
def _split_generators(self, dl_manager):
|
127 |
+
filepaths = dl_manager.download_and_extract(_URLS)
|
|
|
|
|
|
|
|
|
128 |
return [
|
129 |
datasets.SplitGenerator(
|
130 |
+
name=datasets.Split.TRAIN,
|
131 |
+
gen_kwargs={
|
132 |
+
"filepath": filepaths['train'],
|
133 |
+
"split": "train"
|
134 |
+
},
|
135 |
+
),
|
136 |
+
datasets.SplitGenerator(
|
137 |
+
name=datasets.Split.VALIDATION,
|
138 |
+
gen_kwargs={
|
139 |
+
"filepath": filepaths['dev'],
|
140 |
+
"split": "dev"
|
141 |
+
},
|
142 |
+
),
|
143 |
+
datasets.SplitGenerator(
|
144 |
+
name=datasets.Split.TEST,
|
145 |
+
gen_kwargs={
|
146 |
+
"filepath": filepaths['test'],
|
147 |
+
"split": "test"
|
148 |
+
},
|
149 |
+
),
|
150 |
]
|
151 |
|
152 |
def _parse(self, filepath):
|
153 |
with open(filepath, 'r') as f:
|
154 |
+
yield from conllu.parse_incr(f, metadata_parsers={"annotation_gap": lambda key, value: (key, value)})
|
155 |
+
|
156 |
+
def _sentence_iterator(self, filepath, split):
|
157 |
+
document_id = None
|
158 |
+
document_title = None
|
159 |
+
paragraph_id = None
|
160 |
+
gap_number = 0
|
161 |
+
for sentence in self._parse(filepath):
|
162 |
+
# TODO how do we handle annotation gaps?
|
163 |
+
# if "annotation_gap" in sentence.metadata:
|
164 |
+
# document_id = f"unknown_{gap_number}"
|
165 |
+
# document_title = f"unknown_{gap_number}"
|
166 |
+
# paragraph_id = f"unknown_{gap_number}"
|
167 |
+
# gap_number += 1
|
168 |
+
|
169 |
+
document_id = sentence.metadata.get("newdoc id", document_id)
|
170 |
+
document_title = sentence.metadata.get("doc_title", document_title)
|
171 |
+
paragraph_id = sentence.metadata.get("newpar id", paragraph_id)
|
172 |
+
|
173 |
+
yield {
|
174 |
+
"text": sentence.metadata["text"],
|
175 |
+
"document_id": document_id + '_' + split,
|
176 |
+
"document_title": document_title,
|
177 |
+
"paragraph_id": paragraph_id + '_' + split,
|
178 |
+
"sentence_id": sentence.metadata["sent_id"] # sentence id is always present
|
179 |
+
}
|
180 |
|
181 |
def _generate_examples_sentence(self, sentence_iterator):
|
182 |
for sentence in sentence_iterator:
|
183 |
yield sentence["sentence_id"], sentence
|
184 |
|
185 |
def _generate_examples_paragraph(self, sentence_iterator):
|
186 |
+
for key, group in itertools.groupby(sentence_iterator,
|
|
|
187 |
operator.itemgetter("paragraph_id")):
|
188 |
try:
|
189 |
sentence = next(group)
|
|
|
200 |
}
|
201 |
|
202 |
def _generate_examples_document(self, sentence_iterator):
|
203 |
+
for key, group in itertools.groupby(sentence_iterator,
|
|
|
204 |
operator.itemgetter("document_id")):
|
205 |
try:
|
206 |
sentence = next(group)
|
|
|
215 |
"document_title": sentence["document_title"],
|
216 |
}
|
217 |
|
218 |
+
def _generate_examples(self, filepath, split):
|
219 |
+
sentence_iterator = self._sentence_iterator(filepath, split)
|
220 |
|
221 |
if self.config.split_by == "document":
|
222 |
yield from self._generate_examples_document(sentence_iterator)
|