Datasets:
pmc
/

Languages:
English
License:
Albert Villanova del Moral commited on
Commit
1ae4673
1 Parent(s): 40047cd

Add dataset loading script

Browse files
Files changed (1) hide show
  1. open_access.py +210 -0
open_access.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PMC Open Access Subset."""
16
+
17
+ import datetime
18
+
19
+ import pandas as pd
20
+
21
+ import datasets
22
+ from datasets.tasks import LanguageModeling
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ author={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under
38
+ license terms that allow reuse.
39
+
40
+ Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles
41
+ in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more
42
+ liberal redistribution and reuse than a traditional copyrighted work.
43
+
44
+ The PMC Open Access Subset is one part of the PMC Article Datasets
45
+ """
46
+
47
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/"
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = ""
51
+
52
+ _URL = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/{subset}/txt/"
53
+ _SUBSETS = {
54
+ "commercial": "oa_comm",
55
+ "non_commercial": "oa_noncomm",
56
+ "other": "oa_other",
57
+ }
58
+ _BASELINE_DATE = "2021-12-17"
59
+
60
+
61
+ class OpenAccessConfig(datasets.BuilderConfig):
62
+ """BuilderConfig for the PMC Open Access Subset."""
63
+
64
+ def __init__(self, subsets=None, **kwargs):
65
+ """BuilderConfig for the PMC Open Access Subset.
66
+
67
+ Args:
68
+ subsets (:obj:`List[str]`): List of subsets/groups to load.
69
+ **kwargs: Keyword arguments forwarded to super.
70
+ """
71
+ subsets = [subsets] if isinstance(subsets, str) else subsets
72
+ super().__init__(
73
+ name="+".join(subsets), **kwargs,
74
+ )
75
+ self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())
76
+
77
+
78
+ class OpenAccess(datasets.GeneratorBasedBuilder):
79
+ """PMC Open Access Subset."""
80
+
81
+ VERSION = datasets.Version("1.0.0")
82
+ BUILDER_CONFIG_CLASS = OpenAccessConfig
83
+ BUILDER_CONFIGS = [OpenAccessConfig(subsets="all")] + [OpenAccessConfig(subsets=subset) for subset in _SUBSETS]
84
+ DEFAULT_CONFIG_NAME = "all"
85
+
86
+ def _info(self):
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=datasets.Features(
90
+ {
91
+ "text": datasets.Value("string"),
92
+ "pmid": datasets.Value("string"),
93
+ "accession_id": datasets.Value("string"),
94
+ "license": datasets.Value("string"),
95
+ "last_updated": datasets.Value("string"),
96
+ "retracted": datasets.Value("string"),
97
+ "citation": datasets.Value("string"),
98
+ }
99
+ ),
100
+ homepage=_HOMEPAGE,
101
+ license=_LICENSE,
102
+ citation=_CITATION,
103
+ task_templates=[LanguageModeling(text_column="text")],
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ for subset in self.config.subsets:
108
+ url = _URL.format(subset=_SUBSETS[subset])
109
+ basename = f"{_SUBSETS[subset]}_txt."
110
+ # Baselines
111
+ baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(9)]
112
+ # baseline_urls = {
113
+ # "baseline_file_lists": [f"{url}{basename}{baseline}.filelist.csv" for baseline in baselines],
114
+ # "baseline_archives": [f"{url}{basename}{baseline}.tar.gz" for baseline in baselines],
115
+ # }
116
+ # baseline_paths = dl_manager.download(baseline_urls)
117
+ baseline_file_lists = []
118
+ baseline_archives = []
119
+ for baseline in baselines:
120
+ baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
121
+ try:
122
+ baseline_file_list = dl_manager.download(baseline_file_list_url)
123
+ except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
124
+ continue
125
+ baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
126
+ try:
127
+ baseline_archive = dl_manager.download(baseline_archive_url)
128
+ except FileNotFoundError:
129
+ continue
130
+ baseline_file_lists.append(baseline_file_list)
131
+ baseline_archives.append(baseline_archive)
132
+ # Incremental
133
+ date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
134
+ incremental_dates = [
135
+ (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
136
+ for i in range(date_delta.days)
137
+ ]
138
+ incrementals = [f"incr.{date}" for date in incremental_dates]
139
+ incremental_urls = {
140
+ "incremental_file_lists": [
141
+ f"{url}{basename}{incremental}.filelist.csv" for incremental in incrementals
142
+ ],
143
+ "incremental_archives": [f"{url}{basename}{incremental}.tar.gz" for incremental in incrementals],
144
+ }
145
+ incremental_paths = dl_manager.download(incremental_urls)
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TRAIN,
149
+ gen_kwargs={
150
+ "baseline_file_lists": baseline_file_lists,
151
+ "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives],
152
+ "incremental_file_lists": incremental_paths["incremental_file_lists"],
153
+ "incremental_archives": [
154
+ dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
155
+ ],
156
+ },
157
+ ),
158
+ ]
159
+
160
+ def _generate_examples(self, baseline_file_lists, baseline_archives, incremental_file_lists, incremental_archives):
161
+ key = 0
162
+ # Baselines
163
+ for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
164
+ try:
165
+ baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index")
166
+ for path, file in baseline_archive:
167
+ data = baselines.pop(path)
168
+ content = file.read()
169
+ try:
170
+ text = content.decode("utf-8").strip()
171
+ except UnicodeDecodeError as e:
172
+ text = content.decode("latin-1").strip()
173
+ data = {
174
+ "text": text,
175
+ "pmid": data["PMID"],
176
+ "accession_id": data["AccessionID"],
177
+ "license": data["License"],
178
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
179
+ "retracted": data["Retracted"],
180
+ "citation": data["Article Citation"],
181
+ }
182
+ yield key, data
183
+ key += 1
184
+ except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
185
+ continue
186
+ # Incrementals
187
+ if incremental_file_lists:
188
+ for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives):
189
+ import pdb
190
+
191
+ pdb.set_trace()
192
+ incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index")
193
+ for path, file in incremental_archive:
194
+ data = incrementals.pop(path)
195
+ content = file.read()
196
+ try:
197
+ text = content.decode("utf-8").strip()
198
+ except UnicodeDecodeError as e:
199
+ text = content.decode("latin-1").strip()
200
+ data = {
201
+ "text": text,
202
+ "pmid": data["PMID"],
203
+ "accession_id": data["AccessionID"],
204
+ "license": data["License"],
205
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
206
+ "retracted": data["Retracted"],
207
+ "citation": data["Article Citation"],
208
+ }
209
+ yield key, data
210
+ key += 1