system HF staff commited on
Commit
75cc922
1 Parent(s): 08755e8

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. scifact.py +56 -39
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: null
 
1
  ---
2
+ pretty_name: SciFact
3
  languages:
4
  - en
5
  paperswithcode_id: null
scifact.py CHANGED
@@ -3,7 +3,6 @@ using evidence from the cited abstracts."""
3
 
4
 
5
  import json
6
- import os
7
 
8
  import datasets
9
 
@@ -89,14 +88,18 @@ class Scifact(datasets.GeneratorBasedBuilder):
89
  # TODO(scifact): Downloads the data and defines the splits
90
  # dl_manager is a datasets.download.DownloadManager that can be used to
91
  # download and extract URLs
92
- dl_dir = dl_manager.download_and_extract(_URL)
93
 
94
  if self.config.name == "corpus":
95
  return [
96
  datasets.SplitGenerator(
97
  name=datasets.Split.TRAIN,
98
  # These kwargs will be passed to _generate_examples
99
- gen_kwargs={"filepath": os.path.join(dl_dir, "data", "corpus.jsonl"), "split": "train"},
 
 
 
 
100
  ),
101
  ]
102
  else:
@@ -104,62 +107,76 @@ class Scifact(datasets.GeneratorBasedBuilder):
104
  datasets.SplitGenerator(
105
  name=datasets.Split.TRAIN,
106
  # These kwargs will be passed to _generate_examples
107
- gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_train.jsonl"), "split": "train"},
 
 
 
 
108
  ),
109
  datasets.SplitGenerator(
110
  name=datasets.Split.TEST,
111
  # These kwargs will be passed to _generate_examples
112
- gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_test.jsonl"), "split": "test"},
 
 
 
 
113
  ),
114
  datasets.SplitGenerator(
115
  name=datasets.Split.VALIDATION,
116
  # These kwargs will be passed to _generate_examples
117
- gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_dev.jsonl"), "split": "dev"},
 
 
 
 
118
  ),
119
  ]
120
 
121
- def _generate_examples(self, filepath, split):
122
  """Yields examples."""
123
  # TODO(scifact): Yields (key, example) tuples from the dataset
124
- with open(filepath, encoding="utf-8") as f:
125
- for id_, row in enumerate(f):
126
- data = json.loads(row)
127
- if self.config.name == "corpus":
128
- yield id_, {
129
- "doc_id": int(data["doc_id"]),
130
- "title": data["title"],
131
- "abstract": data["abstract"],
132
- "structured": data["structured"],
133
- }
134
- else:
135
- if split == "test":
136
  yield id_, {
137
- "id": data["id"],
138
- "claim": data["claim"],
139
- "evidence_doc_id": "",
140
- "evidence_label": "",
141
- "evidence_sentences": [],
142
- "cited_doc_ids": [],
143
  }
144
  else:
145
- evidences = data["evidence"]
146
- if evidences:
147
- for id1, doc_id in enumerate(evidences):
148
- for id2, evidence in enumerate(evidences[doc_id]):
149
- yield str(id_) + "_" + str(id1) + "_" + str(id2), {
150
- "id": data["id"],
151
- "claim": data["claim"],
152
- "evidence_doc_id": doc_id,
153
- "evidence_label": evidence["label"],
154
- "evidence_sentences": evidence["sentences"],
155
- "cited_doc_ids": data.get("cited_doc_ids", []),
156
- }
157
- else:
158
  yield id_, {
159
  "id": data["id"],
160
  "claim": data["claim"],
161
  "evidence_doc_id": "",
162
  "evidence_label": "",
163
  "evidence_sentences": [],
164
- "cited_doc_ids": data.get("cited_doc_ids", []),
165
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
 
5
  import json
 
6
 
7
  import datasets
8
 
 
88
  # TODO(scifact): Downloads the data and defines the splits
89
  # dl_manager is a datasets.download.DownloadManager that can be used to
90
  # download and extract URLs
91
+ archive = dl_manager.download(_URL)
92
 
93
  if self.config.name == "corpus":
94
  return [
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TRAIN,
97
  # These kwargs will be passed to _generate_examples
98
+ gen_kwargs={
99
+ "filepath": "data/corpus.jsonl",
100
+ "split": "train",
101
+ "files": dl_manager.iter_archive(archive),
102
+ },
103
  ),
104
  ]
105
  else:
 
107
  datasets.SplitGenerator(
108
  name=datasets.Split.TRAIN,
109
  # These kwargs will be passed to _generate_examples
110
+ gen_kwargs={
111
+ "filepath": "data/claims_train.jsonl",
112
+ "split": "train",
113
+ "files": dl_manager.iter_archive(archive),
114
+ },
115
  ),
116
  datasets.SplitGenerator(
117
  name=datasets.Split.TEST,
118
  # These kwargs will be passed to _generate_examples
119
+ gen_kwargs={
120
+ "filepath": "data/claims_test.jsonl",
121
+ "split": "test",
122
+ "files": dl_manager.iter_archive(archive),
123
+ },
124
  ),
125
  datasets.SplitGenerator(
126
  name=datasets.Split.VALIDATION,
127
  # These kwargs will be passed to _generate_examples
128
+ gen_kwargs={
129
+ "filepath": "data/claims_dev.jsonl",
130
+ "split": "dev",
131
+ "files": dl_manager.iter_archive(archive),
132
+ },
133
  ),
134
  ]
135
 
136
+ def _generate_examples(self, filepath, split, files):
137
  """Yields examples."""
138
  # TODO(scifact): Yields (key, example) tuples from the dataset
139
+ for path, f in files:
140
+ if path == filepath:
141
+ for id_, row in enumerate(f):
142
+ data = json.loads(row.decode("utf-8"))
143
+ if self.config.name == "corpus":
 
 
 
 
 
 
 
144
  yield id_, {
145
+ "doc_id": int(data["doc_id"]),
146
+ "title": data["title"],
147
+ "abstract": data["abstract"],
148
+ "structured": data["structured"],
 
 
149
  }
150
  else:
151
+ if split == "test":
 
 
 
 
 
 
 
 
 
 
 
 
152
  yield id_, {
153
  "id": data["id"],
154
  "claim": data["claim"],
155
  "evidence_doc_id": "",
156
  "evidence_label": "",
157
  "evidence_sentences": [],
158
+ "cited_doc_ids": [],
159
  }
160
+ else:
161
+ evidences = data["evidence"]
162
+ if evidences:
163
+ for id1, doc_id in enumerate(evidences):
164
+ for id2, evidence in enumerate(evidences[doc_id]):
165
+ yield str(id_) + "_" + str(id1) + "_" + str(id2), {
166
+ "id": data["id"],
167
+ "claim": data["claim"],
168
+ "evidence_doc_id": doc_id,
169
+ "evidence_label": evidence["label"],
170
+ "evidence_sentences": evidence["sentences"],
171
+ "cited_doc_ids": data.get("cited_doc_ids", []),
172
+ }
173
+ else:
174
+ yield id_, {
175
+ "id": data["id"],
176
+ "claim": data["claim"],
177
+ "evidence_doc_id": "",
178
+ "evidence_label": "",
179
+ "evidence_sentences": [],
180
+ "cited_doc_ids": data.get("cited_doc_ids", []),
181
+ }
182
+ break