Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
MHoubre commited on
Commit
cdf498a
1 Parent(s): 3643c24

upload data

Browse files
Files changed (7) hide show
  1. README.md +20 -1
  2. kpbiomed.py +215 -0
  3. test.jsonl +3 -0
  4. train_large.jsonl +3 -0
  5. train_medium.jsonl +3 -0
  6. train_small.jsonl +3 -0
  7. val.jsonl +3 -0
README.md CHANGED
@@ -1,3 +1,22 @@
1
  ---
2
- license: cc-by-nc-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
+ annotations_creators:
3
+ - unknown
4
+ language_creators:
5
+ - unknown
6
+ language:
7
+ - en
8
+ license:
9
+ - cc-by-nc-4.0
10
+ multilinguality:
11
+ - monolingual
12
+ task_categories:
13
+ - text-mining
14
+ - text-generation
15
+ task_ids:
16
+ - keyphrase-generation
17
+ - keyphrase-extraction
18
+ size_categories:
19
+ - 100K<n<1M
20
+ pretty_name:
21
+ - KP-Biomed
22
  ---
kpbiomed.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """KPBiomed benchmark dataset for keyphrase extraction an generation."""
2
+
3
+
4
+ import csv
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO: Add BibTeX citation
12
+ # Find for instance the citation on arxiv or on the dataset repo/website
13
+ _CITATION = """\
14
+
15
+ """
16
+
17
+ # You can copy an official description
18
+ _DESCRIPTION = """\
19
+ KPBiomed benchmark dataset for keyphrase extraction an generation.
20
+ """
21
+
22
+ # TODO: Add a link to an official homepage for the dataset here
23
+ _HOMEPAGE = ""
24
+
25
+ # TODO: Add the licence for the dataset here if you can find it
26
+ _LICENSE = "Apache 2.0 License"
27
+
28
+ # TODO: Add link to the official dataset URLs here
29
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
30
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
31
+ _URLS = {
32
+ "train_large": "train_large.jsonl",
33
+ "train_medium" : "train_medium.jsonl",
34
+ "train_small" : "train_small.jsonl",
35
+ "val" : "val.jsonl",
36
+ "test" : "test.jsonl"
37
+
38
+ }
39
+
40
+
41
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
42
+ class KPBiomed(datasets.GeneratorBasedBuilder):
43
+ """TODO: Short description of my dataset."""
44
+
45
+ VERSION = datasets.Version("0.0.1")
46
+
47
+ # This is an example of a dataset with multiple configurations.
48
+ # If you don't want/need to define several sub-sets in your dataset,
49
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
50
+
51
+ # If you need to make complex sub-parts in the datasets with configurable options
52
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
53
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
54
+
55
+ # You will be able to load one or the other configurations in the following list with
56
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
57
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
58
+ BUILDER_CONFIGS = [
59
+ datasets.BuilderConfig(name="large", version=VERSION, description="This part of my dataset covers the large training data."),
60
+ datasets.BuilderConfig(name="medium", version=VERSION, description="This part of my dataset covers the medium training data."),
61
+ datasets.BuilderConfig(name="small", version=VERSION, description="This part of my dataset covers the small training data."),
62
+
63
+ ]
64
+
65
+ DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
66
+
67
+ def _info(self):
68
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
69
+ #if self.config.name == "small": # This is the name of the configuration selected in BUILDER_CONFIGS above
70
+ features = datasets.Features(
71
+ {
72
+ "id": datasets.Value("string"),
73
+ "title": datasets.Value("string"),
74
+ "abstract": datasets.Value("string"),
75
+ "authors": datasets.Value("string"),
76
+ "mesh_terms": datasets.features.Sequence(datasets.Value("string")),
77
+ "year": datasets.Value("string"),
78
+ "keyphrases": datasets.features.Sequence(datasets.Value("string")),
79
+ "prmu": datasets.features.Sequence(datasets.Value("string")),
80
+ }
81
+ )
82
+ return datasets.DatasetInfo(
83
+ # This is the description that will appear on the datasets page.
84
+ description=_DESCRIPTION,
85
+ # This defines the different columns of the dataset and their types
86
+ features=features, # Here we define them above because they are different between the two configurations
87
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
88
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
89
+ # supervised_keys=("sentence", "label"),
90
+ # Homepage of the dataset for documentation
91
+ homepage=_HOMEPAGE,
92
+ # License for the dataset if available
93
+ license=_LICENSE,
94
+ # Citation for the dataset
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
100
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
101
+
102
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
103
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
104
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
105
+ urls = _URLS
106
+ data_dir = dl_manager.download_and_extract(urls)
107
+ if self.config.name == "large":
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={
113
+ "filepath": os.path.join(data_dir["train_large"]),
114
+ "split": "train",
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ # These kwargs will be passed to _generate_examples
120
+ gen_kwargs={
121
+ "filepath": os.path.join(data_dir["test"]),
122
+ "split": "test",
123
+ },
124
+ ),
125
+
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": os.path.join(data_dir["val"]),
131
+ "split": "test",
132
+ },
133
+ ),
134
+
135
+ ]
136
+ elif self.config.name == "medium":
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ # These kwargs will be passed to _generate_examples
141
+ gen_kwargs={
142
+ "filepath": os.path.join(data_dir["train_medium"]),
143
+ "split": "train",
144
+ },
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TEST,
148
+ # These kwargs will be passed to _generate_examples
149
+ gen_kwargs={
150
+ "filepath": os.path.join(data_dir["test"]),
151
+ "split": "test",
152
+ },
153
+ ),
154
+
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.VALIDATION,
157
+ # These kwargs will be passed to _generate_examples
158
+ gen_kwargs={
159
+ "filepath": os.path.join(data_dir["val"]),
160
+ "split": "test",
161
+ },
162
+ ),
163
+
164
+ ]
165
+
166
+ else:
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TRAIN,
170
+ # These kwargs will be passed to _generate_examples
171
+ gen_kwargs={
172
+ "filepath": os.path.join(data_dir["train_small"]),
173
+ "split": "train",
174
+ },
175
+ ),
176
+ datasets.SplitGenerator(
177
+ name=datasets.Split.TEST,
178
+ # These kwargs will be passed to _generate_examples
179
+ gen_kwargs={
180
+ "filepath": os.path.join(data_dir["test"]),
181
+ "split": "test",
182
+ },
183
+ ),
184
+
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.VALIDATION,
187
+ # These kwargs will be passed to _generate_examples
188
+ gen_kwargs={
189
+ "filepath": os.path.join(data_dir["val"]),
190
+ "split": "test",
191
+ },
192
+ ),
193
+
194
+ ]
195
+
196
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
197
+ def _generate_examples(self, filepath, split):
198
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
199
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
200
+ with open(filepath, encoding="utf-8") as f:
201
+ for key, row in enumerate(f):
202
+ data = json.loads(row)
203
+ # Yields examples as (key, example) tuples
204
+ yield key, {
205
+ "id": data["id"],
206
+ "title": data["title"],
207
+ "abstract": data["abstract"],
208
+ "authors" : data["authors"],
209
+ "mesh_terms" : data["mesh_terms"],
210
+ "year" : data["year"],
211
+ "keyphrases": data["keyphrases"],
212
+ "prmu": data["prmu"],
213
+ }
214
+
215
+
test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfccd4be512c3c0ea6816975a1831955fa8430f6f6e8e91556b130aec825c9da
3
+ size 43524648
train_large.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caed51ed5152871e4513046ed1238eacd6bb6df437e2fca02240a0e1429f1e50
3
+ size 12333280683
train_medium.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1bc296b121c4fbb44d4bf2b0f18bb114f12777da1e0c22bd3ce77624178c7bf
3
+ size 4416210275
train_small.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebedc42e492c121bfb8aef3ade5237a5e48b220d7e55957ec57c0fd18d0fd9d0
3
+ size 1086716933
val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26e9b435f567a7c24d57fe6039c1810ad76feab75fbf10a1252c85ea801b15d2
3
+ size 43603200