system HF staff commited on
Commit
dc37319
1 Parent(s): 8e40c94

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. eraser_multi_rc.py +26 -27
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: null
1
  ---
2
+ pretty_name: Eraser Multi Rc
3
  languages:
4
  - en
5
  paperswithcode_id: null
eraser_multi_rc.py CHANGED
@@ -18,7 +18,6 @@
18
 
19
 
20
  import json
21
- import os
22
 
23
  import datasets
24
 
@@ -73,46 +72,46 @@ class EraserMultiRc(datasets.GeneratorBasedBuilder):
73
  def _split_generators(self, dl_manager):
74
  """Returns SplitGenerators."""
75
 
76
- dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
77
- data_dir = os.path.join(dl_dir, "multirc")
78
  return [
79
  datasets.SplitGenerator(
80
  name=datasets.Split.TRAIN,
81
  # These kwargs will be passed to _generate_examples
82
- gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "train.jsonl")},
83
  ),
84
  datasets.SplitGenerator(
85
  name=datasets.Split.VALIDATION,
86
  # These kwargs will be passed to _generate_examples
87
- gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "val.jsonl")},
88
  ),
89
  datasets.SplitGenerator(
90
  name=datasets.Split.TEST,
91
  # These kwargs will be passed to _generate_examples
92
- gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "test.jsonl")},
93
  ),
94
  ]
95
 
96
- def _generate_examples(self, data_dir, filepath):
97
  """Yields examples."""
98
 
99
- multirc_dir = os.path.join(data_dir, "docs")
100
- with open(filepath, encoding="utf-8") as f:
101
- for line in f:
102
- row = json.loads(line)
103
- evidences = []
104
-
105
- for evidence in row["evidences"][0]:
106
- docid = evidence["docid"]
107
- evidences.append(evidence["text"])
108
-
109
- passage_file = os.path.join(multirc_dir, docid)
110
- with open(passage_file, encoding="utf-8") as f1:
111
- passage_text = f1.read()
112
-
113
- yield row["annotation_id"], {
114
- "passage": passage_text,
115
- "query_and_answer": row["query"],
116
- "label": row["classification"],
117
- "evidences": evidences,
118
- }
 
18
 
19
 
20
  import json
 
21
 
22
  import datasets
23
 
72
  def _split_generators(self, dl_manager):
73
  """Returns SplitGenerators."""
74
 
75
+ archive = dl_manager.download(_DOWNLOAD_URL)
 
76
  return [
77
  datasets.SplitGenerator(
78
  name=datasets.Split.TRAIN,
79
  # These kwargs will be passed to _generate_examples
80
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "multirc/train.jsonl"},
81
  ),
82
  datasets.SplitGenerator(
83
  name=datasets.Split.VALIDATION,
84
  # These kwargs will be passed to _generate_examples
85
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "multirc/val.jsonl"},
86
  ),
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TEST,
89
  # These kwargs will be passed to _generate_examples
90
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "multirc/test.jsonl"},
91
  ),
92
  ]
93
 
94
+ def _generate_examples(self, files, split_file):
95
  """Yields examples."""
96
 
97
+ multirc_dir = "multirc/docs"
98
+ docs = {}
99
+ for path, f in files:
100
+ docs[path] = f.read().decode("utf-8")
101
+ for line in docs[split_file].splitlines():
102
+ row = json.loads(line)
103
+ evidences = []
104
+
105
+ for evidence in row["evidences"][0]:
106
+ docid = evidence["docid"]
107
+ evidences.append(evidence["text"])
108
+
109
+ passage_file = "/".join([multirc_dir, docid])
110
+ passage_text = docs[passage_file]
111
+
112
+ yield row["annotation_id"], {
113
+ "passage": passage_text,
114
+ "query_and_answer": row["query"],
115
+ "label": row["classification"],
116
+ "evidences": evidences,
117
+ }