Amanpreet Singh commited on
Commit
7474d71
1 Parent(s): 0dbfe9b

new commit

Browse files
Files changed (3) hide show
  1. README.md +379 -0
  2. scirepeval_test.py +197 -0
  3. scirepeval_test_configs.py +99 -0
README.md ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ - config_name: fos
4
+ features:
5
+ - name: paper_id
6
+ dtype: string
7
+ - name: label
8
+ sequence: int32
9
+ splits:
10
+ - name: test
11
+ num_bytes: 51276
12
+ num_examples: 472
13
+ - name: train
14
+ num_bytes: 5873604
15
+ num_examples: 54131
16
+ download_size: 3194762
17
+ dataset_size: 5924880
18
+ - config_name: mesh_descriptors
19
+ features:
20
+ - name: paper_id
21
+ dtype: string
22
+ - name: label
23
+ dtype: int32
24
+ splits:
25
+ - name: test
26
+ num_bytes: 820660
27
+ num_examples: 51738
28
+ - name: train
29
+ num_bytes: 3283053
30
+ num_examples: 206949
31
+ download_size: 3203144
32
+ dataset_size: 4103713
33
+ - config_name: cite_count
34
+ features:
35
+ - name: paper_id
36
+ dtype: string
37
+ - name: label
38
+ dtype: float64
39
+ splits:
40
+ - name: test
41
+ num_bytes: 121260
42
+ num_examples: 6012
43
+ - name: train
44
+ num_bytes: 483822
45
+ num_examples: 24000
46
+ download_size: 477603
47
+ dataset_size: 605082
48
+ - config_name: pub_year
49
+ features:
50
+ - name: paper_id
51
+ dtype: string
52
+ - name: label
53
+ dtype: float64
54
+ splits:
55
+ - name: test
56
+ num_bytes: 123284
57
+ num_examples: 6000
58
+ - name: train
59
+ num_bytes: 493073
60
+ num_examples: 24000
61
+ download_size: 518506
62
+ dataset_size: 616357
63
+ - config_name: high_influence_cite
64
+ features:
65
+ - name: query_id
66
+ dtype: string
67
+ - name: cand_id
68
+ dtype: string
69
+ - name: score
70
+ dtype: uint8
71
+ splits:
72
+ - name: test
73
+ num_bytes: 1439013
74
+ num_examples: 58255
75
+ download_size: 3477938
76
+ dataset_size: 1439013
77
+ - config_name: same_author
78
+ features:
79
+ - name: query_id
80
+ dtype: string
81
+ - name: cand_id
82
+ dtype: string
83
+ - name: score
84
+ dtype: uint8
85
+ splits:
86
+ - name: test
87
+ num_bytes: 3144107
88
+ num_examples: 123430
89
+ download_size: 7464157
90
+ dataset_size: 3144107
91
+ - config_name: search
92
+ features:
93
+ - name: query_id
94
+ dtype: string
95
+ - name: cand_id
96
+ dtype: string
97
+ - name: score
98
+ dtype: uint8
99
+ splits:
100
+ - name: test
101
+ num_bytes: 1283980
102
+ num_examples: 25850
103
+ download_size: 2188731
104
+ dataset_size: 1283980
105
+ - config_name: drsm
106
+ features:
107
+ - name: paper_id
108
+ dtype: string
109
+ - name: label
110
+ dtype: int32
111
+ splits:
112
+ - name: test
113
+ num_bytes: 15277
114
+ num_examples: 955
115
+ - name: train
116
+ num_bytes: 119083
117
+ num_examples: 7520
118
+ download_size: 100492
119
+ dataset_size: 134360
120
+ - config_name: feeds_1
121
+ features:
122
+ - name: query_id
123
+ dtype: string
124
+ - name: cand_id
125
+ dtype: string
126
+ - name: score
127
+ dtype: uint8
128
+ splits:
129
+ - name: test
130
+ num_bytes: 110997
131
+ num_examples: 4223
132
+ download_size: 258802
133
+ dataset_size: 110997
134
+ - config_name: feeds_m
135
+ features:
136
+ - name: query_id
137
+ dtype: string
138
+ - name: cand_id
139
+ dtype: string
140
+ - name: score
141
+ dtype: uint8
142
+ splits:
143
+ - name: test
144
+ num_bytes: 2321483
145
+ num_examples: 87528
146
+ download_size: 5384963
147
+ dataset_size: 2321483
148
+ - config_name: feeds_title
149
+ features:
150
+ - name: query_id
151
+ dtype: string
152
+ - name: cand_id
153
+ dtype: string
154
+ - name: score
155
+ dtype: uint8
156
+ splits:
157
+ - name: test
158
+ num_bytes: 210605
159
+ num_examples: 4233
160
+ download_size: 358760
161
+ dataset_size: 210605
162
+ - config_name: peer_review_score
163
+ features:
164
+ - name: paper_id
165
+ dtype: string
166
+ - name: label
167
+ dtype: float64
168
+ splits:
169
+ - name: test
170
+ num_bytes: 89892
171
+ num_examples: 2043
172
+ - name: train
173
+ num_bytes: 359348
174
+ num_examples: 8167
175
+ download_size: 408432
176
+ dataset_size: 449240
177
+ - config_name: hIndex
178
+ features:
179
+ - name: paper_id
180
+ dtype: string
181
+ - name: label
182
+ dtype: float64
183
+ splits:
184
+ - name: test
185
+ num_bytes: 94864
186
+ num_examples: 2156
187
+ - name: train
188
+ num_bytes: 382756
189
+ num_examples: 8699
190
+ download_size: 434232
191
+ dataset_size: 477620
192
+ - config_name: trec_covid
193
+ features:
194
+ - name: query_id
195
+ dtype: string
196
+ - name: cand_id
197
+ dtype: string
198
+ - name: score
199
+ dtype: int8
200
+ splits:
201
+ - name: test
202
+ num_bytes: 3396582
203
+ num_examples: 69318
204
+ download_size: 5822714
205
+ dataset_size: 3396582
206
+ - config_name: tweet_mentions
207
+ features:
208
+ - name: paper_id
209
+ dtype: string
210
+ - name: label
211
+ dtype: float64
212
+ splits:
213
+ - name: test
214
+ num_bytes: 111212
215
+ num_examples: 5132
216
+ - name: train
217
+ num_bytes: 444784
218
+ num_examples: 20523
219
+ download_size: 454231
220
+ dataset_size: 555996
221
+ - config_name: scidocs_mag
222
+ features:
223
+ - name: paper_id
224
+ dtype: string
225
+ - name: label
226
+ dtype: int32
227
+ splits:
228
+ - name: test
229
+ num_bytes: 180048
230
+ num_examples: 3751
231
+ - name: train
232
+ num_bytes: 840048
233
+ num_examples: 17501
234
+ download_size: 923863
235
+ dataset_size: 1020096
236
+ - config_name: scidocs_mesh
237
+ features:
238
+ - name: paper_id
239
+ dtype: string
240
+ - name: label
241
+ dtype: int32
242
+ splits:
243
+ - name: test
244
+ num_bytes: 169488
245
+ num_examples: 3531
246
+ - name: train
247
+ num_bytes: 790944
248
+ num_examples: 16478
249
+ download_size: 862299
250
+ dataset_size: 960432
251
+ - config_name: scidocs_view
252
+ features:
253
+ - name: query_id
254
+ dtype: string
255
+ - name: cand_id
256
+ dtype: string
257
+ - name: score
258
+ dtype: uint8
259
+ splits:
260
+ - name: test
261
+ num_bytes: 2668042
262
+ num_examples: 29978
263
+ download_size: 3717272
264
+ dataset_size: 2668042
265
+ - config_name: scidocs_cite
266
+ features:
267
+ - name: query_id
268
+ dtype: string
269
+ - name: cand_id
270
+ dtype: string
271
+ - name: score
272
+ dtype: uint8
273
+ splits:
274
+ - name: test
275
+ num_bytes: 2663592
276
+ num_examples: 29928
277
+ download_size: 3711072
278
+ dataset_size: 2663592
279
+ - config_name: scidocs_cocite
280
+ features:
281
+ - name: query_id
282
+ dtype: string
283
+ - name: cand_id
284
+ dtype: string
285
+ - name: score
286
+ dtype: uint8
287
+ splits:
288
+ - name: test
289
+ num_bytes: 2665461
290
+ num_examples: 29949
291
+ download_size: 3713676
292
+ dataset_size: 2665461
293
+ - config_name: scidocs_read
294
+ features:
295
+ - name: query_id
296
+ dtype: string
297
+ - name: cand_id
298
+ dtype: string
299
+ - name: score
300
+ dtype: uint8
301
+ splits:
302
+ - name: test
303
+ num_bytes: 2667953
304
+ num_examples: 29977
305
+ download_size: 3717148
306
+ dataset_size: 2667953
307
+ - config_name: reviewers
308
+ features:
309
+ - name: r_id
310
+ dtype: string
311
+ - name: papers
312
+ sequence: string
313
+ splits:
314
+ - name: metadata
315
+ num_bytes: 3564977
316
+ num_examples: 668
317
+ download_size: 3576339
318
+ dataset_size: 3564977
319
+ - config_name: paper_reviewer_matching
320
+ features:
321
+ - name: query_id
322
+ dtype: string
323
+ - name: cand_id
324
+ dtype: string
325
+ - name: score
326
+ dtype: uint8
327
+ splits:
328
+ - name: test_hard
329
+ num_bytes: 50603
330
+ num_examples: 1729
331
+ - name: test_soft
332
+ num_bytes: 50603
333
+ num_examples: 1729
334
+ download_size: 222236
335
+ dataset_size: 101206
336
+ - config_name: biomimicry
337
+ features:
338
+ - name: paper_id
339
+ dtype: string
340
+ - name: label
341
+ dtype: int32
342
+ splits:
343
+ - name: test
344
+ num_bytes: 44513
345
+ num_examples: 2748
346
+ - name: train
347
+ num_bytes: 133570
348
+ num_examples: 8243
349
+ download_size: 134151
350
+ dataset_size: 178083
351
+ - config_name: relish
352
+ features:
353
+ - name: query_id
354
+ dtype: string
355
+ - name: cand_id
356
+ dtype: string
357
+ - name: score
358
+ dtype: uint8
359
+ splits:
360
+ - name: test
361
+ num_bytes: 4779565
362
+ num_examples: 191245
363
+ download_size: 11473140
364
+ dataset_size: 4779565
365
+ - config_name: nfcorpus
366
+ features:
367
+ - name: query_id
368
+ dtype: string
369
+ - name: cand_id
370
+ dtype: string
371
+ - name: score
372
+ dtype: uint8
373
+ splits:
374
+ - name: test
375
+ num_bytes: 1188859
376
+ num_examples: 44634
377
+ download_size: 2751049
378
+ dataset_size: 1188859
379
+ ---
scirepeval_test.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import glob
22
+
23
+ import datasets
24
+ from datasets.data_files import DataFilesDict
25
+ from .scirepeval_test_configs import SCIREPEVAL_CONFIGS
26
+ #from datasets.packaged_modules.json import json
27
+
28
+
29
+ # TODO: Add BibTeX citation
30
+ # Find for instance the citation on arxiv or on the dataset repo/website
31
+ _CITATION = """\
32
+ @InProceedings{huggingface:dataset,
33
+ title = {A great new dataset},
34
+ author={huggingface, Inc.
35
+ },
36
+ year={2021}
37
+ }
38
+ """
39
+
40
+ # TODO: Add description of the dataset here
41
+ # You can copy an official description
42
+ _DESCRIPTION = """\
43
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
44
+ """
45
+
46
+ # TODO: Add a link to an official homepage for the dataset here
47
+ _HOMEPAGE = ""
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = ""
51
+
52
+ # TODO: Add link to the official dataset URLs here
53
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
+ _URLS = {
56
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
57
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
58
+ }
59
+
60
+
61
+
62
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
63
+ class Scirepeval(datasets.GeneratorBasedBuilder):
64
+ """TODO: Short description of my dataset."""
65
+
66
+ VERSION = datasets.Version("1.1.0")
67
+
68
+ # This is an example of a dataset with multiple configurations.
69
+ # If you don't want/need to define several sub-sets in your dataset,
70
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
71
+
72
+ # If you need to make complex sub-parts in the datasets with configurable options
73
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
74
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
75
+
76
+ # You will be able to load one or the other configurations in the following list with
77
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
78
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
79
+ BUILDER_CONFIGS = SCIREPEVAL_CONFIGS
80
+
81
+ def _info(self):
82
+ return datasets.DatasetInfo(
83
+ # This is the description that will appear on the datasets page.
84
+ description=_DESCRIPTION,
85
+ # This defines the different columns of the dataset and their types
86
+ features=datasets.Features(self.config.features), # Here we define them above because they are different between the two configurations
87
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
88
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
89
+ # supervised_keys=("sentence", "label"),
90
+ # Homepage of the dataset for documentation
91
+ homepage=_HOMEPAGE,
92
+ # License for the dataset if available
93
+ license=_LICENSE,
94
+ # Citation for the dataset
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
100
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
101
+ base_url = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/scirepeval"
102
+ data_urls = dict()
103
+ data_dir = self.config.url if self.config.url else self.config.name
104
+
105
+ if self.config.task_type in set(["classification", "regression"]):
106
+ data_urls.update({"train": f"{base_url}/test/{data_dir}/train.csv"})
107
+ data_urls.update({"test": f"{base_url}/test/{data_dir}/test.csv"})
108
+ elif self.config.task_type == "metadata":
109
+ data_urls.update({"metadata": f"{base_url}/test/{data_dir}/reviewer_metadata.jsonl"})
110
+ elif "reviewer_matching" in self.config.name:
111
+ data_urls.update({"test_hard": f"{base_url}/test/{data_dir}/test_hard_qrel.jsonl",
112
+ "test_soft": f"{base_url}/test/{data_dir}/test_soft_qrel.jsonl"})
113
+ else:
114
+ data_urls.update({"test": f"{base_url}/test/{data_dir}/test_qrel.jsonl"})
115
+
116
+ downloaded_files = dl_manager.download_and_extract(data_urls)
117
+ splits = []
118
+ if self.config.task_type == "metadata":
119
+ splits = [datasets.SplitGenerator(
120
+ name=datasets.Split("metadata"),
121
+ # These kwargs will be passed to _generate_examples
122
+ gen_kwargs={
123
+ "filepath": downloaded_files["metadata"],
124
+ "split": "metadata"
125
+ },
126
+ ),
127
+ ]
128
+ elif "reviewer_matching" in self.config.name:
129
+ splits = [datasets.SplitGenerator(
130
+ name=datasets.Split("test_hard"),
131
+ # These kwargs will be passed to _generate_examples
132
+ gen_kwargs={
133
+ "filepath": downloaded_files["test_hard"],
134
+ "split": "test"
135
+ },
136
+ ),
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split("test_soft"),
139
+ # These kwargs will be passed to _generate_examples
140
+ gen_kwargs={
141
+ "filepath": downloaded_files["test_soft"],
142
+ "split": "test"
143
+ },
144
+ )
145
+ ]
146
+ else:
147
+ splits = [datasets.SplitGenerator(
148
+ name=datasets.Split.TEST,
149
+ # These kwargs will be passed to _generate_examples
150
+ gen_kwargs={
151
+ "filepath": downloaded_files["test"],
152
+ "split": "test"
153
+ },
154
+ ),
155
+ ]
156
+
157
+ if "train" in downloaded_files:
158
+ splits += [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ # These kwargs will be passed to _generate_examples
162
+ gen_kwargs={
163
+ "filepath": downloaded_files["train"],
164
+ "split": "train",
165
+ },
166
+ )]
167
+ return splits
168
+
169
+
170
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
171
+ def _generate_examples(self, filepath, split):
172
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
173
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
174
+ # data = read_data(filepath)
175
+ if self.config.task_type in set(["classification", "regression"]):
176
+ import csv
177
+ import ast
178
+ with open(filepath, encoding="utf-8") as f:
179
+ reader = csv.reader(f)
180
+ for id_, row in enumerate(reader):
181
+ if id_ == 0:
182
+ continue
183
+ yield id_, {
184
+ "paper_id": row[0],
185
+ "label": ast.literal_eval(",".join(row[1:])) if self.config.name=="fos" else row[1]
186
+ }
187
+ elif self.config.task_type == "metadata":
188
+ with open(filepath, encoding="utf-8") as f:
189
+ for line in f:
190
+ d = json.loads(line)
191
+ yield d["r_id"], d
192
+ else:
193
+ with open(filepath, encoding="utf-8") as f:
194
+ for i, line in enumerate(f):
195
+ d = json.loads(line)
196
+ yield i, d
197
+
scirepeval_test_configs.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List
2
+
3
+ import datasets
4
+
5
+
6
+ class ScirepevalConfig(datasets.BuilderConfig):
7
+ """BuilderConfig for SuperGLUE."""
8
+
9
+ def __init__(self, task_type: str, features: Dict[str, Any]=None, url="", **kwargs):
10
+ """BuilderConfig for SuperGLUE.
11
+
12
+ Args:
13
+ features: *list[string]*, list of the features that will appear in the
14
+ feature dict. Should not include "label".
15
+ data_url: *string*, url to download the zip file from.
16
+ citation: *string*, citation for the data set.
17
+ url: *string*, url for information about the data set.
18
+ label_classes: *list[string]*, the list of classes for the label if the
19
+ label is present as a string. Non-string labels will be cast to either
20
+ 'False' or 'True'.
21
+ **kwargs: keyword arguments forwarded to super.
22
+ """
23
+ super().__init__(version=datasets.Version("1.1.0"), **kwargs)
24
+ self.features = features
25
+ self.task_type = task_type
26
+ self.url = url
27
+
28
+
29
+ SCIREPEVAL_CONFIGS = [
30
+ ScirepevalConfig(name="fos", features={"paper_id": datasets.Value("string"),
31
+ "label": datasets.Sequence(datasets.Value("int32"))}, task_type="classification"),
32
+
33
+ ScirepevalConfig(name="mesh_descriptors", features={"paper_id": datasets.Value("string"),
34
+ "label": datasets.Value("int32")}, task_type="classification"),
35
+
36
+ ScirepevalConfig(name="biomimicry", features={"paper_id": datasets.Value("string"),
37
+ "label": datasets.Value("int32")}, task_type="classification"),
38
+
39
+ ScirepevalConfig(name="cite_count", features={"paper_id": datasets.Value("string"),
40
+ "label": datasets.Value("float64")}, task_type="regression"),
41
+
42
+ ScirepevalConfig(name="pub_year", features={"paper_id": datasets.Value("string"),
43
+ "label": datasets.Value("float64")}, task_type="regression"),
44
+
45
+ ScirepevalConfig(name="high_influence_cite", features={"query_id": datasets.Value("string"),
46
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity"),
47
+
48
+ ScirepevalConfig(name="same_author", features={"query_id": datasets.Value("string"),
49
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity"),
50
+
51
+ ScirepevalConfig(name="search", features={"query_id": datasets.Value("string"),
52
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="search"),
53
+
54
+ ScirepevalConfig(name="drsm", task_type="classification", features={"paper_id": datasets.Value("string"),
55
+ "label": datasets.Value("int32")}),
56
+
57
+ ScirepevalConfig(name="relish", features={"query_id": datasets.Value("string"),
58
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity"),
59
+
60
+ ScirepevalConfig(name="nfcorpus", features={"query_id": datasets.Value("string"),
61
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="search"),
62
+
63
+ ScirepevalConfig(name="peer_review_score", task_type="regression", url="peer_review_score_hIndex/peer_review_score", features={"paper_id": datasets.Value("string"),
64
+ "label": datasets.Value("float64")}),
65
+
66
+ ScirepevalConfig(name="hIndex", task_type="regression", url="peer_review_score_hIndex/hIndex", features={"paper_id": datasets.Value("string"),
67
+ "label": datasets.Value("float64")}),
68
+
69
+ ScirepevalConfig(name="trec_covid", features={"query_id": datasets.Value("string"),
70
+ "cand_id": datasets.Value("string"), "score": datasets.Value("int8")}, task_type="search"),
71
+
72
+ ScirepevalConfig(name="tweet_mentions", task_type="regression", features={"paper_id": datasets.Value("string"),
73
+ "label": datasets.Value("float64")}),
74
+
75
+ ScirepevalConfig(name="scidocs_mag", task_type="classification", url="scidocs/mag_mesh/mag", features={"paper_id": datasets.Value("string"),
76
+ "label": datasets.Value("int32")}),
77
+
78
+ ScirepevalConfig(name="scidocs_mesh", task_type="classification", url="scidocs/mag_mesh/mesh", features={"paper_id": datasets.Value("string"),
79
+ "label": datasets.Value("int32")}),
80
+
81
+ ScirepevalConfig(name="scidocs_view", features={"query_id": datasets.Value("string"),
82
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity", url="scidocs/view_cite_read/coview"),
83
+
84
+ ScirepevalConfig(name="scidocs_cite", features={"query_id": datasets.Value("string"),
85
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity", url="scidocs/view_cite_read/cite"),
86
+
87
+ ScirepevalConfig(name="scidocs_cocite", features={"query_id": datasets.Value("string"),
88
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity", url="scidocs/view_cite_read/cocite"),
89
+
90
+ ScirepevalConfig(name="scidocs_read", features={"query_id": datasets.Value("string"),
91
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity", url="scidocs/view_cite_read/coread"),
92
+
93
+ ScirepevalConfig(name="reviewers", task_type="metadata", url="paper_reviewer_matching", features={"r_id": datasets.Value("string"),
94
+ "papers": datasets.Sequence(datasets.Value("string"))}),
95
+
96
+ ScirepevalConfig(name="paper_reviewer_matching", features={"query_id": datasets.Value("string"),
97
+ "cand_id": datasets.Value("string"), "score": datasets.Value("uint8")}, task_type="proximity"),
98
+
99
+ ]