urbija commited on
Commit
76f1d09
1 Parent(s): 3e64812

fixed https://github.com/huggingface/datasets/issues/2746

Browse files
Files changed (1) hide show
  1. few-nerd.py +10 -8
few-nerd.py CHANGED
@@ -21,6 +21,8 @@ and 4,601,223 tokens. Three benchmark tasks are built, one is supervised: Few-NE
21
  other two are few-shot: Few-NERD (INTRA) and Few-NERD (INTER).
22
  """
23
 
 
 
24
  # the original data files (zip of .txt) can be downloaded from tsinghua cloud
25
  _URLs = {
26
  "supervised": "https://cloud.tsinghua.edu.cn/f/09265750ae6340429827/?dl=1",
@@ -241,13 +243,13 @@ class FewNERD(datasets.GeneratorBasedBuilder):
241
 
242
  def _split_generators(self, dl_manager):
243
  """Returns SplitGenerators."""
244
- urls_to_download = dl_manager.download_and_extract(_URLs)
245
  return [
246
  datasets.SplitGenerator(
247
  name=datasets.Split.TRAIN,
248
  gen_kwargs={
249
  "filepath": os.path.join(
250
- urls_to_download[self.config.name],
251
  self.config.name,
252
  "train.txt",
253
  )
@@ -257,7 +259,7 @@ class FewNERD(datasets.GeneratorBasedBuilder):
257
  name=datasets.Split.VALIDATION,
258
  gen_kwargs={
259
  "filepath": os.path.join(
260
- urls_to_download[self.config.name], self.config.name, "dev.txt"
261
  )
262
  },
263
  ),
@@ -265,7 +267,7 @@ class FewNERD(datasets.GeneratorBasedBuilder):
265
  name=datasets.Split.TEST,
266
  gen_kwargs={
267
  "filepath": os.path.join(
268
- urls_to_download[self.config.name], self.config.name, "test.txt"
269
  )
270
  },
271
  ),
@@ -275,10 +277,10 @@ class FewNERD(datasets.GeneratorBasedBuilder):
275
  # check file type
276
  assert filepath[-4:] == ".txt"
277
 
278
- num_lines = sum(1 for _ in open(filepath))
279
  id = 0
280
 
281
- with open(filepath, "r") as f:
282
  tokens, ner_tags, fine_ner_tags = [], [], []
283
  for line in tqdm(f, total=num_lines):
284
  line = line.strip().split()
@@ -303,7 +305,7 @@ class FewNERD(datasets.GeneratorBasedBuilder):
303
  tokens, ner_tags, fine_ner_tags = [], [], []
304
  id += 1
305
  yield record["id"], record
306
-
307
  # take the last sentence
308
  if tokens:
309
  record = {
@@ -312,4 +314,4 @@ class FewNERD(datasets.GeneratorBasedBuilder):
312
  "ner_tags": ner_tags,
313
  "fine_ner_tags": fine_ner_tags,
314
  }
315
- yield record["id"], record
21
  other two are few-shot: Few-NERD (INTRA) and Few-NERD (INTER).
22
  """
23
 
24
+ _LICENSE = "CC BY-SA 4.0"
25
+
26
  # the original data files (zip of .txt) can be downloaded from tsinghua cloud
27
  _URLs = {
28
  "supervised": "https://cloud.tsinghua.edu.cn/f/09265750ae6340429827/?dl=1",
243
 
244
  def _split_generators(self, dl_manager):
245
  """Returns SplitGenerators."""
246
+ url_to_download = dl_manager.download_and_extract(_URLs[self.config.name])
247
  return [
248
  datasets.SplitGenerator(
249
  name=datasets.Split.TRAIN,
250
  gen_kwargs={
251
  "filepath": os.path.join(
252
+ url_to_download,
253
  self.config.name,
254
  "train.txt",
255
  )
259
  name=datasets.Split.VALIDATION,
260
  gen_kwargs={
261
  "filepath": os.path.join(
262
+ url_to_download, self.config.name, "dev.txt"
263
  )
264
  },
265
  ),
267
  name=datasets.Split.TEST,
268
  gen_kwargs={
269
  "filepath": os.path.join(
270
+ url_to_download, self.config.name, "test.txt"
271
  )
272
  },
273
  ),
277
  # check file type
278
  assert filepath[-4:] == ".txt"
279
 
280
+ num_lines = sum(1 for _ in open(filepath, encoding="utf-8"))
281
  id = 0
282
 
283
+ with open(filepath, "r", encoding="utf-8") as f:
284
  tokens, ner_tags, fine_ner_tags = [], [], []
285
  for line in tqdm(f, total=num_lines):
286
  line = line.strip().split()
305
  tokens, ner_tags, fine_ner_tags = [], [], []
306
  id += 1
307
  yield record["id"], record
308
+
309
  # take the last sentence
310
  if tokens:
311
  record = {
314
  "ner_tags": ner_tags,
315
  "fine_ner_tags": fine_ner_tags,
316
  }
317
+ yield record["id"], record