File size: 6,323 Bytes
1ae4673
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5864b5
b443dc5
1ff41f9
 
de237a5
1ff41f9
 
1ae4673
 
 
 
 
 
 
 
 
 
 
 
 
 
de237a5
 
1ae4673
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc01299
cff04b2
1ae4673
 
 
 
1ff41f9
de237a5
 
 
 
1ae4673
 
 
 
 
 
 
de237a5
 
 
 
cff04b2
bc01299
 
 
 
 
cff04b2
bc01299
 
 
1ae4673
cff04b2
1ae4673
cff04b2
 
 
 
7897dd5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PMC Open Access Subset."""

import datetime

import pandas as pd

import datasets
from datasets.tasks import LanguageModeling


# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""

_DESCRIPTION = """\
The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under
license terms that allow reuse. 

Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles
in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more
liberal redistribution and reuse than a traditional copyrighted work. 

The PMC Open Access Subset is one part of the PMC Article Datasets
"""

_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/"

# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""

_URL = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/{subset}/txt/"
_SUBSETS = {
    "commercial": "oa_comm",
    "non_commercial": "oa_noncomm",
    "other": "oa_other",
}
_BASELINE_DATE = "2022-12-17"
_BASELINE_MAX_RANGE = 10
_BASELINE_RANGES = {
    "commercial": range(_BASELINE_MAX_RANGE),
    "non_commercial": range(1, _BASELINE_MAX_RANGE),  # non-commercial PMC000xxxxxx baseline does not exist
    "other": range(_BASELINE_MAX_RANGE),
}


class OpenAccessConfig(datasets.BuilderConfig):
    """BuilderConfig for the PMC Open Access Subset."""

    def __init__(self, subsets=None, **kwargs):
        """BuilderConfig for the PMC Open Access Subset.

        Args:
            subsets (:obj:`List[str]`): List of subsets/groups to load.
            **kwargs: Keyword arguments forwarded to super.
        """
        subsets = [subsets] if isinstance(subsets, str) else subsets
        super().__init__(
            name="+".join(subsets),
            **kwargs,
        )
        self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())


class OpenAccess(datasets.GeneratorBasedBuilder):
    """PMC Open Access Subset."""

    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIG_CLASS = OpenAccessConfig
    BUILDER_CONFIGS = [OpenAccessConfig(subsets="all")] + [OpenAccessConfig(subsets=subset) for subset in _SUBSETS]
    DEFAULT_CONFIG_NAME = "all"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "pmid": datasets.Value("string"),
                    "accession_id": datasets.Value("string"),
                    "license": datasets.Value("string"),
                    "last_updated": datasets.Value("string"),
                    "retracted": datasets.Value("string"),
                    "citation": datasets.Value("string"),
                }
            ),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            task_templates=[LanguageModeling(text_column="text")],
        )

    def _split_generators(self, dl_manager):

        paths = []
        for subset in self.config.subsets:
            url = _URL.format(subset=_SUBSETS[subset])
            basename = f"{_SUBSETS[subset]}_txt."
            # Baselines
            baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in _BASELINE_RANGES[subset]]
            baseline_urls = [
                (f"{url}{basename}{baseline}.filelist.csv", f"{url}{basename}{baseline}.tar.gz")
                for baseline in baselines
            ]
            # Incremental
            date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
            incremental_dates = [
                (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
                for i in range(date_delta.days)
            ]
            incrementals = [f"incr.{date}" for date in incremental_dates]
            incremental_urls = [
                (f"{url}{basename}{incremental}.filelist.csv", f"{url}{basename}{incremental}.tar.gz")
                for incremental in incrementals
            ]
            paths += dl_manager.download(baseline_urls + incremental_urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "paths": [(file_list, dl_manager.iter_archive(archive)) for file_list, archive in paths],
                },
            ),
        ]

    def _generate_examples(self, paths):
        key = 0
        for file_list, archive in paths:
            file_list_data = pd.read_csv(file_list, index_col="Article File").to_dict(orient="index")
            for path, file in archive:
                data = file_list_data.pop(path)
                content = file.read()
                try:
                    text = content.decode("utf-8").strip()
                except UnicodeDecodeError as e:
                    text = content.decode("latin-1").strip()
                data = {
                    "text": text,
                    "pmid": data["PMID"],
                    "accession_id": data["AccessionID"],
                    "license": data["License"],
                    "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
                    "retracted": data["Retracted"],
                    "citation": data["Article Citation"],
                }
                yield key, data
                key += 1