howey commited on
Commit
489cd12
2 Parent(s): 33b0cae d8ac4f0

Merge branch 'main' of https://huggingface.co/datasets/howey/super_scirep

Browse files
Files changed (3) hide show
  1. .gitattributes +104 -0
  2. super_scirep.py +193 -0
  3. super_scirep_config.py +218 -0
.gitattributes ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
55
+ feeds_m/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
56
+ fos/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
57
+ paper_reviewer_matching/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
58
+ search/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
59
+ tweet_mentions/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
60
+ cite_count/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
61
+ feeds_1/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
62
+ feeds_title/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
63
+ high_influence_cite/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
64
+ peer_review_score_hIndex/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
65
+ pub_year/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
66
+ scidocs_view_cite_read/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
67
+ search/validation/state.json filter=lfs diff=lfs merge=lfs -text
68
+ cite_count/validation/state.json filter=lfs diff=lfs merge=lfs -text
69
+ feeds_title/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
70
+ fos/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
71
+ pub_year/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
72
+ pub_year/validation/state.json filter=lfs diff=lfs merge=lfs -text
73
+ feeds_m/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
74
+ fos/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
75
+ fos/validation/state.json filter=lfs diff=lfs merge=lfs -text
76
+ search/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
77
+ feeds_m/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
78
+ pub_year/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
79
+ search/train/state.json filter=lfs diff=lfs merge=lfs -text
80
+ tweet_mentions/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
81
+ feeds_1/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
82
+ fos/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
83
+ fos/train/state.json filter=lfs diff=lfs merge=lfs -text
84
+ paper_reviewer_matching/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
85
+ pub_year/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
86
+ tweet_mentions/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
87
+ cite_count/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
88
+ peer_review_score_hIndex/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
89
+ pub_year/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
90
+ search/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
91
+ search/train/dataset_info.json filter=lfs diff=lfs merge=lfs -text
92
+ peer_review_score_hIndex/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
93
+ pub_year/train/state.json filter=lfs diff=lfs merge=lfs -text
94
+ cite_count/train/state.json filter=lfs diff=lfs merge=lfs -text
95
+ cite_count/validation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
96
+ feeds_1/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
97
+ feeds_title/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
98
+ fos/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
99
+ cite_count/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
100
+ cite_count/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
101
+ paper_reviewer_matching/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
102
+ scidocs_view_cite_read/evaluation/state.json filter=lfs diff=lfs merge=lfs -text
103
+ scidocs_view_cite_read/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
104
+ search/evaluation/dataset_info.json filter=lfs diff=lfs merge=lfs -text
super_scirep.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import csv
18
+ import json
19
+
20
+ import datasets
21
+ from datasets.data_files import DataFilesDict
22
+ from .super_scirep_config import SUPERSCIREPEVAL_CONFIGS
23
+
24
+ # from datasets.packaged_modules.json import json
25
+
26
+
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={huggingface, Inc.
33
+ },
34
+ year={2021}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _URLS = {
54
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
55
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
56
+ }
57
+
58
+
59
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
60
+ class SuperSciRep(datasets.GeneratorBasedBuilder):
61
+ """TODO: Short description of my dataset."""
62
+
63
+ VERSION = datasets.Version("1.1.0")
64
+
65
+ # This is an example of a dataset with multiple configurations.
66
+ # If you don't want/need to define several sub-sets in your dataset,
67
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
68
+
69
+ # If you need to make complex sub-parts in the datasets with configurable options
70
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
71
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
72
+
73
+ # You will be able to load one or the other configurations in the following list with
74
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
75
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
76
+ BUILDER_CONFIGS = SUPERSCIREPEVAL_CONFIGS
77
+
78
+ def _info(self):
79
+ return datasets.DatasetInfo(
80
+ # This is the description that will appear on the datasets page.
81
+ description=self.config.description,
82
+ # This defines the different columns of the dataset and their types
83
+ features=datasets.Features(self.config.features),
84
+ # Here we define them above because they are different between the two configurations
85
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
86
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
87
+ # supervised_keys=("sentence", "label"),
88
+ # Homepage of the dataset for documentation
89
+ homepage="",
90
+ # License for the dataset if available
91
+ license=self.config.license,
92
+ # Citation for the dataset
93
+ citation=self.config.citation,
94
+ )
95
+
96
+ def _split_generators(self, dl_manager):
97
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
98
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
99
+ # base_url = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/scirepeval"
100
+ base_url = "https://hhy-tue.s3.eu-central-1.amazonaws.com/data/super_scirep"
101
+ data_urls = dict()
102
+ # data_dir = self.config.url if self.config.url else self.config.name
103
+ data_dir = self.config.name
104
+ if self.config.is_training:
105
+ data_urls = {"train": f"{base_url}/{data_dir}/train.jsonl",
106
+ "val": f"{base_url}/{data_dir}/validation.jsonl"}
107
+
108
+ if "cite_prediction" not in self.config.name:
109
+ data_urls.update({"test": f"{base_url}/{data_dir}/evaluation.jsonl"})
110
+ print(data_urls)
111
+ downloaded_files = dl_manager.download_and_extract(data_urls)
112
+ splits = []
113
+ if "test" in downloaded_files:
114
+ splits = [datasets.SplitGenerator(
115
+ name=datasets.Split("evaluation"),
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": downloaded_files["test"],
119
+ "split": "evaluation"
120
+ },
121
+ ),
122
+ ]
123
+
124
+ if "train" in downloaded_files:
125
+ splits += [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": downloaded_files["train"],
131
+ "split": "train",
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ # These kwargs will be passed to _generate_examples
137
+ gen_kwargs={
138
+ "filepath": downloaded_files["val"],
139
+ "split": "validation",
140
+ })
141
+ ]
142
+ return splits
143
+
144
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
145
+ def _generate_examples(self, filepath, split):
146
+ def read_data(data_path):
147
+ task_data = []
148
+ try:
149
+ task_data = json.load(open(data_path, "r", encoding="utf-8"))
150
+ except:
151
+ with open(data_path) as f:
152
+ task_data = [json.loads(line) for line in f]
153
+ if type(task_data) == dict:
154
+ task_data = list(task_data.values())
155
+ return task_data
156
+
157
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
158
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
159
+ # data = read_data(filepath)
160
+ seen_keys = set()
161
+ IGNORE = set(["n_key_citations", "session_id", "user_id", "user"])
162
+ with open(filepath, encoding="utf-8") as f:
163
+ for line in f:
164
+ d = json.loads(line)
165
+ d = {k: v for k, v in d.items() if k not in IGNORE}
166
+ key = "doc_id" if self.config.name != "cite_prediction_new" else "corpus_id"
167
+ if self.config.task_type == "proximity":
168
+ if "cite_prediction" in self.config.name:
169
+ if "arxiv_id" in d["query"]:
170
+ for item in ["query", "pos", "neg"]:
171
+ del d[item]["arxiv_id"]
172
+ del d[item]["doi"]
173
+ if "fos" in d["query"]:
174
+ del d["query"]["fos"]
175
+ if "score" in d["pos"]:
176
+ del d["pos"]["score"]
177
+ yield str(d["query"][key]) + str(d["pos"][key]) + str(d["neg"][key]), d
178
+ else:
179
+ if d["query"][key] not in seen_keys:
180
+ seen_keys.add(d["query"][key])
181
+ yield str(d["query"][key]), d
182
+ else:
183
+ if d[key] not in seen_keys:
184
+ seen_keys.add(d[key])
185
+ if self.config.task_type != "search":
186
+ if "corpus_id" not in d:
187
+ d["corpus_id"] = None
188
+ if "scidocs" in self.config.name:
189
+ if "cited by" not in d:
190
+ d["cited_by"] = []
191
+ if type(d["corpus_id"]) == str:
192
+ d["corpus_id"] = None
193
+ yield d[key], d
super_scirep_config.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List
2
+
3
+ import datasets
4
+
5
+
6
+ class SuperSciRepConfig(datasets.BuilderConfig):
7
+ """BuilderConfig for SuperGLUE."""
8
+
9
+ def __init__(self, features: Dict[str, Any], task_type: str, citation: str = "",
10
+ licenses: str = "", is_training: bool = False, homepage: str = "", url="", **kwargs):
11
+ """BuilderConfig for SuperGLUE.
12
+
13
+ Args:
14
+ features: *list[string]*, list of the features that will appear in the
15
+ feature dict. Should not include "label".
16
+ data_url: *string*, url to download the zip file from.
17
+ citation: *string*, citation for the data set.
18
+ url: *string*, url for information about the data set.
19
+ label_classes: *list[string]*, the list of classes for the label if the
20
+ label is present as a string. Non-string labels will be cast to either
21
+ 'False' or 'True'.
22
+ **kwargs: keyword arguments forwarded to super.
23
+ """
24
+ super().__init__(version=datasets.Version("1.1.0"), **kwargs)
25
+ self.features = features
26
+ self.task_type = task_type
27
+ self.citation = citation
28
+ self.license = licenses
29
+ self.is_training = is_training
30
+ self.homepage = homepage
31
+ self.url = url
32
+
33
+ @classmethod
34
+ def get_features(self, feature_names: List[str], type_mapping: Dict[str, Any] = None) -> Dict[str, Any]:
35
+
36
+ full_text_mapping = {"full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}]}
37
+ type_mapping = {**full_text_mapping, **type_mapping}
38
+ features = {name: type_mapping[name] if name in type_mapping else datasets.Value("string") for name in
39
+ feature_names}
40
+ if "corpus_id" in features:
41
+ features["corpus_id"] = datasets.Value("uint64")
42
+ return features
43
+
44
+
45
+ SUPERSCIREPEVAL_CONFIGS = [
46
+ SuperSciRepConfig(name="fos", features=SuperSciRepConfig.get_features(
47
+ ["doc_id", "corpus_id", "title", "abstract", "full_text", "labels", "labels_text"],
48
+ {"labels": datasets.Sequence(datasets.Value("int32")),
49
+ "labels_text": datasets.Sequence(datasets.Value("string"))}),
50
+ task_type="classification (multi-label)", is_training=True, description=""),
51
+
52
+ SuperSciRepConfig(name="cite_count", features=SuperSciRepConfig.get_features(
53
+ ["doc_id", "corpus_id", "title", "abstract", "full_text", "venue", "n_citations", "log_citations"],
54
+ {"n_citations": datasets.Value("int32"),
55
+ "log_citations": datasets.Value("float32")}),
56
+ task_type="regression", is_training=True, description=""
57
+ ),
58
+
59
+ SuperSciRepConfig(name="pub_year", features=SuperSciRepConfig.get_features(
60
+ ["doc_id", "corpus_id", "title", "abstract", "full_text", "year", "venue", "norm_year", "scaled_year", "n_authors", "norm_authors"],
61
+ {"year": datasets.Value("int32"), "norm_year": datasets.Value("float32"),
62
+ "scaled_year": datasets.Value("float32"), "n_authors": datasets.Value("int32"),
63
+ "norm_authors": datasets.Value("float32"), }),
64
+ task_type="regression", is_training=True, description=""),
65
+
66
+
67
+ SuperSciRepConfig(name="high_influence_cite",
68
+ features=SuperSciRepConfig.get_features(["query", "candidates"],
69
+ {"query": {
70
+ "doc_id": datasets.Value("string"),
71
+ "title": datasets.Value("string"),
72
+ "abstract": datasets.Value(
73
+ "string"),
74
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
75
+ "corpus_id": datasets.Value("uint64")},
76
+ "candidates":
77
+ [{"doc_id": datasets.Value("string"),
78
+ "title": datasets.Value("string"),
79
+ "abstract": datasets.Value(
80
+ "string"),
81
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
82
+ "corpus_id": datasets.Value("uint64"),
83
+ "score": datasets.Value("uint32")}]}),
84
+ task_type="proximity", is_training=True, description=""),
85
+
86
+
87
+ SuperSciRepConfig(name="search",
88
+ features=SuperSciRepConfig.get_features(["query", "doc_id", "candidates"],
89
+ {"candidates":
90
+ [{
91
+ "doc_id": datasets.Value("string"),
92
+ "title": datasets.Value("string"),
93
+ "abstract": datasets.Value(
94
+ "string"),
95
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
96
+ "corpus_id": datasets.Value("uint64"),
97
+ "venue": datasets.Value("string"),
98
+ "year": datasets.Value("float64"),
99
+ "author_names": datasets.Sequence(datasets.Value("string")),
100
+ "n_citations": datasets.Value("int32"),
101
+ "n_key_citations": datasets.Value("int32"),
102
+ "score": datasets.Value("uint32")}]}),
103
+ task_type="search", is_training=True, description=""),
104
+
105
+
106
+ SuperSciRepConfig(name="feeds_1",
107
+ features=SuperSciRepConfig.get_features(["query", "feed_id", "candidates"],
108
+ {"query": {
109
+ "doc_id": datasets.Value("string"),
110
+ "title": datasets.Value("string"),
111
+ "abstract": datasets.Value(
112
+ "string"),
113
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
114
+ "corpus_id": datasets.Value("uint64")},
115
+ "candidates":
116
+ [{
117
+ "doc_id": datasets.Value("string"),
118
+ "title": datasets.Value("string"),
119
+ "abstract": datasets.Value(
120
+ "string"),
121
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
122
+ "corpus_id": datasets.Value("uint64"),
123
+ "score": datasets.Value("uint32")}]}),
124
+ task_type="proximity", description="", url="feeds/feeds_1"),
125
+
126
+ SuperSciRepConfig(name="feeds_m",
127
+ features=SuperSciRepConfig.get_features(["query", "feed_id", "candidates"],
128
+ {"query": {
129
+ "doc_id": datasets.Value("string"),
130
+ "title": datasets.Value("string"),
131
+ "abstract": datasets.Value(
132
+ "string"),
133
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
134
+ "corpus_id": datasets.Value("uint64")},
135
+ "candidates":
136
+ [{
137
+ "doc_id": datasets.Value("string"),
138
+ "title": datasets.Value("string"),
139
+ "abstract": datasets.Value(
140
+ "string"),
141
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
142
+ "corpus_id": datasets.Value("uint64"),
143
+ "score": datasets.Value("uint32")}]}),
144
+ task_type="proximity", description="", url="feeds/feeds_m"),
145
+
146
+ SuperSciRepConfig(name="feeds_title",
147
+ features=SuperSciRepConfig.get_features(["query", "doc_id", "feed_id", "abbreviations", "candidates"],
148
+ {"candidates":
149
+ [{
150
+ "doc_id": datasets.Value("string"),
151
+ "title": datasets.Value("string"),
152
+ "abstract": datasets.Value(
153
+ "string"),
154
+ "full_text": [{"title": datasets.Value("string"), "sentences": datasets.features.Sequence(datasets.Value("string"))}],
155
+ "corpus_id": datasets.Value("uint64"),
156
+ "score": datasets.Value("uint32")}]}),
157
+ task_type="search", description="", url="feeds/feeds_title"),
158
+
159
+ SuperSciRepConfig(name="peer_review_score_hIndex", features=SuperSciRepConfig.get_features(
160
+ ["doc_id", "corpus_id", "title", "abstract", "full_text", "rating", "confidence", "authors", "decision", "mean_rating", "hIndex"],
161
+ {"mean_rating": datasets.Value("float32"),
162
+ "rating": datasets.Sequence(datasets.Value("int32")),
163
+ "authors": datasets.Sequence(datasets.Value("string")),
164
+ "hIndex": datasets.Sequence(datasets.Value("string"))
165
+ }),
166
+ task_type="regression", description=""
167
+ ),
168
+
169
+
170
+
171
+ SuperSciRepConfig(name="tweet_mentions", features=SuperSciRepConfig.get_features(
172
+ ["doc_id", "corpus_id", "title", "abstract", "full_text", "index", "retweets", "count", "mentions"],
173
+ {"index": datasets.Value("int32"), "count": datasets.Value("int32"),
174
+ "retweets": datasets.Value("float32"), "mentions": datasets.Value("float32")}),
175
+ task_type="regression", description="",
176
+ citation="@article{Jain2021TweetPapAD,\
177
+ title={TweetPap: A Dataset to Study the Social Media Discourse of Scientific Papers},\
178
+ author={Naman Jain and Mayank Kumar Singh},\
179
+ journal={2021 ACM/IEEE Joint Conference on Digital Libraries (JCDL)},\
180
+ year={2021},\
181
+ pages={328-329}\
182
+ }"),
183
+
184
+
185
+ SuperSciRepConfig(name="scidocs_view_cite_read", features=SuperSciRepConfig.get_features(
186
+ ["doc_id", "corpus_id", "title", "abstract", "full_text", "authors", "cited_by", "references", "year"],
187
+ {"year": datasets.Value("int32"),
188
+ "authors": datasets.Sequence(datasets.Value("string")),
189
+ "cited_by": datasets.Sequence(datasets.Value("string")),
190
+ "references": datasets.Sequence(datasets.Value("string"))
191
+ }),
192
+ task_type="metadata", description="", url="scidocs/view_cite_read",
193
+ homepage="https://github.com/allenai/scidocs", citation="@inproceedings{specter2020cohan,\
194
+ title={SPECTER: Document-level Representation Learning using Citation-informed Transformers},\
195
+ author={Arman Cohan and Sergey Feldman and Iz Beltagy and Doug Downey and Daniel S. Weld},\
196
+ booktitle={ACL},\
197
+ year={2020}\
198
+ }"),
199
+
200
+ SuperSciRepConfig(name="paper_reviewer_matching", features=SuperSciRepConfig.get_features(
201
+ ["doc_id", "title", "abstract", "full_text", "corpus_id"],
202
+ {}),
203
+ task_type="metadata", description="", citation="@inproceedings{Mimno2007ExpertiseMF,\
204
+ title={Expertise modeling for matching papers with reviewers},\
205
+ author={David Mimno and Andrew McCallum},\
206
+ booktitle={KDD '07},\
207
+ year={2007}\
208
+ }, @ARTICLE{9714338,\
209
+ author={Zhao, Yue and Anand, Ajay and Sharma, Gaurav},\
210
+ journal={IEEE Access}, \
211
+ title={Reviewer Recommendations Using Document Vector Embeddings and a Publisher Database: Implementation and Evaluation}, \
212
+ year={2022},\
213
+ volume={10},\
214
+ number={},\
215
+ pages={21798-21811},\
216
+ doi={10.1109/ACCESS.2022.3151640}}")
217
+
218
+ ]