Goader commited on
Commit
ed1443d
1 Parent(s): b31c225

Upload ukrainian-treebank-lm.py

Browse files
Files changed (1) hide show
  1. ukrainian-treebank-lm.py +56 -49
ukrainian-treebank-lm.py CHANGED
@@ -13,7 +13,6 @@
13
  # limitations under the License.
14
  """Ukrainian Treebank (Language Modeling) - dataset by Universal Dependencies preprocessed for language modeling"""
15
 
16
-
17
  import itertools
18
  import operator
19
  import conllu
@@ -55,22 +54,18 @@ _URLS = {
55
  class UkrainianTreebankLMConfig(datasets.BuilderConfig):
56
  """BuilderConfig for UkrainianTreebankLM"""
57
 
58
- def __init__(self, *args, split_by: str = 'paragraph', split: str = 'all', **kwargs):
59
  """BuilderConfig for UkrainianTreebankLM.
60
  Args:
61
  *args: keyword arguments forwarded to super.
62
  split_by: one of 'document', 'paragraph', 'sentence'
63
- split: one of 'all', 'train', 'dev', 'test'
64
  **kwargs: keyword arguments forwarded to super.
65
  """
66
  super(UkrainianTreebankLMConfig, self).__init__(*args, **kwargs)
67
  assert split_by in ['document', 'paragraph', 'sentence'], \
68
  "split_by should be one of 'document', 'paragraph', 'sentence'"
69
- assert split in ['all', 'train', 'dev', 'test'], \
70
- "split should be one of 'all', 'train', 'dev', 'test'"
71
 
72
  self.split_by = split_by
73
- self.split = split
74
 
75
 
76
  class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
@@ -80,16 +75,14 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
80
  BUILDER_CONFIG_CLASS = UkrainianTreebankLMConfig
81
  BUILDER_CONFIGS = [
82
  UkrainianTreebankLMConfig(
83
- name=split + '_' + split_by,
84
- description=f"{split.capitalize()} {'parts' if split == 'all' else 'part'} split by {split_by}",
85
  split_by=split_by,
86
- split=split
87
  )
88
- for split, split_by in itertools.product(['all', 'train', 'dev', 'test'],
89
- ['document', 'paragraph', 'sentence'])
90
  ]
91
 
92
- DEFAULT_CONFIG_NAME = "all_paragraph"
93
 
94
  def _info(self):
95
  if self.config.split_by == "document":
@@ -131,51 +124,66 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
131
  )
132
 
133
  def _split_generators(self, dl_manager):
134
- if self.config.split == "all":
135
- urls = list(_URLS.values())
136
- else:
137
- urls = [_URLS[self.config.split]]
138
- filepaths = dl_manager.download_and_extract(urls)
139
  return [
140
  datasets.SplitGenerator(
141
- name=split_name,
142
- gen_kwargs={"filepaths": filepaths},
143
- )
144
- for split_name in [datasets.Split.TRAIN,
145
- datasets.Split.VALIDATION,
146
- datasets.Split.TEST,
147
- datasets.Split.ALL]
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  ]
149
 
150
  def _parse(self, filepath):
151
  with open(filepath, 'r') as f:
152
- document_id = None
153
- document_title = None
154
- paragraph_id = None
155
- for sentence in conllu.parse_incr(f):
156
- document_id = sentence.metadata.get("newdoc id", document_id)
157
- document_title = sentence.metadata.get("doc title", document_title)
158
- paragraph_id = sentence.metadata.get("newpar id", paragraph_id)
159
-
160
- yield {
161
- "text": sentence.metadata["text"],
162
- "document_id": document_id,
163
- "document_title": document_title,
164
- "paragraph_id": paragraph_id,
165
- "sentence_id": sentence.metadata["sent_id"] # sentence id is always present
166
- }
167
-
168
- def _parse_filepaths(self, filepaths):
169
- for filepath in filepaths:
170
- yield from self._parse(filepath)
 
 
 
 
 
 
 
171
 
172
  def _generate_examples_sentence(self, sentence_iterator):
173
  for sentence in sentence_iterator:
174
  yield sentence["sentence_id"], sentence
175
 
176
  def _generate_examples_paragraph(self, sentence_iterator):
177
- sorted_sentences = sorted(sentence_iterator, key=operator.itemgetter("sentence_id"))
178
- for key, group in itertools.groupby(sorted_sentences,
179
  operator.itemgetter("paragraph_id")):
180
  try:
181
  sentence = next(group)
@@ -192,8 +200,7 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
192
  }
193
 
194
  def _generate_examples_document(self, sentence_iterator):
195
- sorted_sentences = sorted(sentence_iterator, key=operator.itemgetter("sentence_id"))
196
- for key, group in itertools.groupby(sorted_sentences,
197
  operator.itemgetter("document_id")):
198
  try:
199
  sentence = next(group)
@@ -208,8 +215,8 @@ class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
208
  "document_title": sentence["document_title"],
209
  }
210
 
211
- def _generate_examples(self, filepaths):
212
- sentence_iterator = self._parse_filepaths(filepaths)
213
 
214
  if self.config.split_by == "document":
215
  yield from self._generate_examples_document(sentence_iterator)
 
13
  # limitations under the License.
14
  """Ukrainian Treebank (Language Modeling) - dataset by Universal Dependencies preprocessed for language modeling"""
15
 
 
16
  import itertools
17
  import operator
18
  import conllu
 
54
  class UkrainianTreebankLMConfig(datasets.BuilderConfig):
55
  """BuilderConfig for UkrainianTreebankLM"""
56
 
57
+ def __init__(self, *args, split_by: str = 'paragraph', **kwargs):
58
  """BuilderConfig for UkrainianTreebankLM.
59
  Args:
60
  *args: keyword arguments forwarded to super.
61
  split_by: one of 'document', 'paragraph', 'sentence'
 
62
  **kwargs: keyword arguments forwarded to super.
63
  """
64
  super(UkrainianTreebankLMConfig, self).__init__(*args, **kwargs)
65
  assert split_by in ['document', 'paragraph', 'sentence'], \
66
  "split_by should be one of 'document', 'paragraph', 'sentence'"
 
 
67
 
68
  self.split_by = split_by
 
69
 
70
 
71
  class UkrainianTreebankLM(datasets.GeneratorBasedBuilder):
 
75
  BUILDER_CONFIG_CLASS = UkrainianTreebankLMConfig
76
  BUILDER_CONFIGS = [
77
  UkrainianTreebankLMConfig(
78
+ name=split_by,
79
+ description=f"Ukrainian Treebank split by {split_by}",
80
  split_by=split_by,
 
81
  )
82
+ for split_by in ['document', 'paragraph', 'sentence']
 
83
  ]
84
 
85
+ DEFAULT_CONFIG_NAME = "paragraph"
86
 
87
  def _info(self):
88
  if self.config.split_by == "document":
 
124
  )
125
 
126
  def _split_generators(self, dl_manager):
127
+ filepaths = dl_manager.download_and_extract(_URLS)
 
 
 
 
128
  return [
129
  datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "filepath": filepaths['train'],
133
+ "split": "train"
134
+ },
135
+ ),
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.VALIDATION,
138
+ gen_kwargs={
139
+ "filepath": filepaths['dev'],
140
+ "split": "dev"
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TEST,
145
+ gen_kwargs={
146
+ "filepath": filepaths['test'],
147
+ "split": "test"
148
+ },
149
+ ),
150
  ]
151
 
152
  def _parse(self, filepath):
153
  with open(filepath, 'r') as f:
154
+ yield from conllu.parse_incr(f, metadata_parsers={"annotation_gap": lambda key, value: (key, value)})
155
+
156
+ def _sentence_iterator(self, filepath, split):
157
+ document_id = None
158
+ document_title = None
159
+ paragraph_id = None
160
+ gap_number = 0
161
+ for sentence in self._parse(filepath):
162
+ # TODO how do we handle annotation gaps?
163
+ # if "annotation_gap" in sentence.metadata:
164
+ # document_id = f"unknown_{gap_number}"
165
+ # document_title = f"unknown_{gap_number}"
166
+ # paragraph_id = f"unknown_{gap_number}"
167
+ # gap_number += 1
168
+
169
+ document_id = sentence.metadata.get("newdoc id", document_id)
170
+ document_title = sentence.metadata.get("doc_title", document_title)
171
+ paragraph_id = sentence.metadata.get("newpar id", paragraph_id)
172
+
173
+ yield {
174
+ "text": sentence.metadata["text"],
175
+ "document_id": document_id + '_' + split,
176
+ "document_title": document_title,
177
+ "paragraph_id": paragraph_id + '_' + split,
178
+ "sentence_id": sentence.metadata["sent_id"] # sentence id is always present
179
+ }
180
 
181
  def _generate_examples_sentence(self, sentence_iterator):
182
  for sentence in sentence_iterator:
183
  yield sentence["sentence_id"], sentence
184
 
185
  def _generate_examples_paragraph(self, sentence_iterator):
186
+ for key, group in itertools.groupby(sentence_iterator,
 
187
  operator.itemgetter("paragraph_id")):
188
  try:
189
  sentence = next(group)
 
200
  }
201
 
202
  def _generate_examples_document(self, sentence_iterator):
203
+ for key, group in itertools.groupby(sentence_iterator,
 
204
  operator.itemgetter("document_id")):
205
  try:
206
  sentence = next(group)
 
215
  "document_title": sentence["document_title"],
216
  }
217
 
218
+ def _generate_examples(self, filepath, split):
219
+ sentence_iterator = self._sentence_iterator(filepath, split)
220
 
221
  if self.config.split_by == "document":
222
  yield from self._generate_examples_document(sentence_iterator)