system HF staff commited on
Commit
8f5e82b
1 Parent(s): 3d7277e

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. scicite.py +37 -35
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: scicite
 
1
  ---
2
+ pretty_name: SciCite
3
  languages:
4
  - en
5
  paperswithcode_id: scicite
scicite.py CHANGED
@@ -18,7 +18,6 @@
18
 
19
 
20
  import json
21
- import os
22
 
23
  import datasets
24
 
@@ -95,53 +94,56 @@ class Scicite(datasets.GeneratorBasedBuilder):
95
 
96
  def _split_generators(self, dl_manager):
97
  """Returns SplitGenerators."""
98
- dl_paths = dl_manager.download_and_extract(
99
- {
100
- "scicite": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scicite/scicite.tar.gz",
101
- }
102
- )
103
- path = os.path.join(dl_paths["scicite"], "scicite")
104
  return [
105
  datasets.SplitGenerator(
106
  name=datasets.Split.TRAIN,
107
- gen_kwargs={"path": os.path.join(path, "train.jsonl")},
 
 
 
108
  ),
109
  datasets.SplitGenerator(
110
  name=datasets.Split.VALIDATION,
111
- gen_kwargs={"path": os.path.join(path, "dev.jsonl")},
112
  ),
113
  datasets.SplitGenerator(
114
  name=datasets.Split.TEST,
115
- gen_kwargs={"path": os.path.join(path, "test.jsonl")},
 
 
 
116
  ),
117
  ]
118
 
119
- def _generate_examples(self, path=None):
120
  """Yields examples."""
121
- with open(path, encoding="utf-8") as f:
122
- unique_ids = {}
123
- for line in f:
124
- d = json.loads(line)
125
- unique_id = str(d["unique_id"])
126
- if unique_id in unique_ids:
127
- continue
128
- unique_ids[unique_id] = True
129
- yield unique_id, {
130
- "string": d["string"],
131
- "label": str(d["label"]),
132
- "sectionName": str(d["sectionName"]),
133
- "citingPaperId": str(d["citingPaperId"]),
134
- "citedPaperId": str(d["citedPaperId"]),
135
- "excerpt_index": int(d["excerpt_index"]),
136
- "isKeyCitation": bool(d["isKeyCitation"]),
137
- "label2": str(d.get("label2", "none")),
138
- "citeEnd": _safe_int(d["citeEnd"]),
139
- "citeStart": _safe_int(d["citeStart"]),
140
- "source": str(d["source"]),
141
- "label_confidence": float(d.get("label_confidence", 0.0)),
142
- "label2_confidence": float(d.get("label2_confidence", 0.0)),
143
- "id": str(d["id"]),
144
- }
 
 
145
 
146
 
147
  def _safe_int(a):
 
18
 
19
 
20
  import json
 
21
 
22
  import datasets
23
 
 
94
 
95
  def _split_generators(self, dl_manager):
96
  """Returns SplitGenerators."""
97
+ archive = dl_manager.download("https://s3-us-west-2.amazonaws.com/ai2-s2-research/scicite/scicite.tar.gz")
 
 
 
 
 
98
  return [
99
  datasets.SplitGenerator(
100
  name=datasets.Split.TRAIN,
101
+ gen_kwargs={
102
+ "filepath": "/".join(["scicite", "train.jsonl"]),
103
+ "files": dl_manager.iter_archive(archive),
104
+ },
105
  ),
106
  datasets.SplitGenerator(
107
  name=datasets.Split.VALIDATION,
108
+ gen_kwargs={"filepath": "/".join(["scicite", "dev.jsonl"]), "files": dl_manager.iter_archive(archive)},
109
  ),
110
  datasets.SplitGenerator(
111
  name=datasets.Split.TEST,
112
+ gen_kwargs={
113
+ "filepath": "/".join(["scicite", "test.jsonl"]),
114
+ "files": dl_manager.iter_archive(archive),
115
+ },
116
  ),
117
  ]
118
 
119
+ def _generate_examples(self, filepath, files):
120
  """Yields examples."""
121
+ for path, f in files:
122
+ if path == filepath:
123
+ unique_ids = {}
124
+ for line in f:
125
+ d = json.loads(line.decode("utf-8"))
126
+ unique_id = str(d["unique_id"])
127
+ if unique_id in unique_ids:
128
+ continue
129
+ unique_ids[unique_id] = True
130
+ yield unique_id, {
131
+ "string": d["string"],
132
+ "label": str(d["label"]),
133
+ "sectionName": str(d["sectionName"]),
134
+ "citingPaperId": str(d["citingPaperId"]),
135
+ "citedPaperId": str(d["citedPaperId"]),
136
+ "excerpt_index": int(d["excerpt_index"]),
137
+ "isKeyCitation": bool(d["isKeyCitation"]),
138
+ "label2": str(d.get("label2", "none")),
139
+ "citeEnd": _safe_int(d["citeEnd"]),
140
+ "citeStart": _safe_int(d["citeStart"]),
141
+ "source": str(d["source"]),
142
+ "label_confidence": float(d.get("label_confidence", 0.0)),
143
+ "label2_confidence": float(d.get("label2_confidence", 0.0)),
144
+ "id": str(d["id"]),
145
+ }
146
+ break
147
 
148
 
149
  def _safe_int(a):