system HF staff commited on
Commit
1da39cb
1 Parent(s): 9e139aa

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. id_nergrit_corpus.py +36 -40
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
 
1
  ---
2
+ pretty_name: Nergrit Corpus
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
id_nergrit_corpus.py CHANGED
@@ -15,8 +15,6 @@
15
  """Nergrit Corpus"""
16
 
17
 
18
- import os
19
-
20
  import datasets
21
 
22
 
@@ -180,62 +178,60 @@ class IdNergritCorpus(datasets.GeneratorBasedBuilder):
180
 
181
  def _split_generators(self, dl_manager):
182
  my_urls = _URLs[0]
183
- data_dir = dl_manager.download_and_extract(my_urls)
184
  return [
185
  datasets.SplitGenerator(
186
  name=datasets.Split.TRAIN,
187
  gen_kwargs={
188
- "filepath": os.path.join(
189
- data_dir, "nergrit-corpus/{}/data/train_corrected.txt".format(self.config.name)
190
- ),
191
  "split": "train",
 
192
  },
193
  ),
194
  datasets.SplitGenerator(
195
  name=datasets.Split.TEST,
196
  gen_kwargs={
197
- "filepath": os.path.join(
198
- data_dir, "nergrit-corpus/{}/data/test_corrected.txt".format(self.config.name)
199
- ),
200
  "split": "test",
 
201
  },
202
  ),
203
  datasets.SplitGenerator(
204
  name=datasets.Split.VALIDATION,
205
  gen_kwargs={
206
- "filepath": os.path.join(
207
- data_dir, "nergrit-corpus/{}/data/valid_corrected.txt".format(self.config.name)
208
- ),
209
  "split": "dev",
 
210
  },
211
  ),
212
  ]
213
 
214
- def _generate_examples(self, filepath, split):
215
- logger.info("⏳ Generating %s examples from = %s", split, filepath)
216
- with open(filepath, encoding="utf-8") as f:
217
- guid = 0
218
- tokens = []
219
- ner_tags = []
220
- for line in f:
221
- splits = line.strip().split()
222
- if len(splits) != 2:
223
- if tokens:
224
- assert len(tokens) == len(ner_tags), "word len doesn't match label length"
225
- yield guid, {
226
- "id": str(guid),
227
- "tokens": tokens,
228
- "ner_tags": ner_tags,
229
- }
230
- guid += 1
231
- tokens = []
232
- ner_tags = []
233
- else:
234
- tokens.append(splits[0])
235
- ner_tags.append(splits[1].rstrip())
236
- # last example
237
- yield guid, {
238
- "id": str(guid),
239
- "tokens": tokens,
240
- "ner_tags": ner_tags,
241
- }
 
 
15
  """Nergrit Corpus"""
16
 
17
 
 
 
18
  import datasets
19
 
20
 
 
178
 
179
  def _split_generators(self, dl_manager):
180
  my_urls = _URLs[0]
181
+ archive = dl_manager.download(my_urls)
182
  return [
183
  datasets.SplitGenerator(
184
  name=datasets.Split.TRAIN,
185
  gen_kwargs={
186
+ "filepath": f"nergrit-corpus/{self.config.name}/data/train_corrected.txt",
 
 
187
  "split": "train",
188
+ "files": dl_manager.iter_archive(archive),
189
  },
190
  ),
191
  datasets.SplitGenerator(
192
  name=datasets.Split.TEST,
193
  gen_kwargs={
194
+ "filepath": f"nergrit-corpus/{self.config.name}/data/test_corrected.txt",
 
 
195
  "split": "test",
196
+ "files": dl_manager.iter_archive(archive),
197
  },
198
  ),
199
  datasets.SplitGenerator(
200
  name=datasets.Split.VALIDATION,
201
  gen_kwargs={
202
+ "filepath": f"nergrit-corpus/{self.config.name}/data/valid_corrected.txt",
 
 
203
  "split": "dev",
204
+ "files": dl_manager.iter_archive(archive),
205
  },
206
  ),
207
  ]
208
 
209
+ def _generate_examples(self, filepath, split, files):
210
+ for path, f in files:
211
+ if path == filepath:
212
+ guid = 0
213
+ tokens = []
214
+ ner_tags = []
215
+ for line in f:
216
+ splits = line.decode("utf-8").strip().split()
217
+ if len(splits) != 2:
218
+ if tokens:
219
+ assert len(tokens) == len(ner_tags), "word len doesn't match label length"
220
+ yield guid, {
221
+ "id": str(guid),
222
+ "tokens": tokens,
223
+ "ner_tags": ner_tags,
224
+ }
225
+ guid += 1
226
+ tokens = []
227
+ ner_tags = []
228
+ else:
229
+ tokens.append(splits[0])
230
+ ner_tags.append(splits[1].rstrip())
231
+ # last example
232
+ yield guid, {
233
+ "id": str(guid),
234
+ "tokens": tokens,
235
+ "ner_tags": ner_tags,
236
+ }
237
+ break