Commit
•
3568620
1
Parent(s):
71d1212
Request data URLs dynamically (#16)
Browse files- Request data URLs dynamically (8514e48cb751b21da108a41b6c3fc2e3326b9228)
- open_access.py +44 -28
open_access.py
CHANGED
@@ -15,7 +15,9 @@
|
|
15 |
"""PMC Open Access Subset."""
|
16 |
|
17 |
import datetime
|
|
|
18 |
|
|
|
19 |
import pandas as pd
|
20 |
|
21 |
import datasets
|
@@ -52,28 +54,52 @@ _SUBSETS = {
|
|
52 |
"non_commercial": "oa_noncomm",
|
53 |
"other": "oa_other",
|
54 |
}
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
"
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
|
64 |
class OpenAccessConfig(datasets.BuilderConfig):
|
65 |
"""BuilderConfig for the PMC Open Access Subset."""
|
66 |
|
67 |
-
def __init__(self, date=
|
68 |
"""BuilderConfig for the PMC Open Access Subset.
|
69 |
|
70 |
Args:
|
71 |
-
date (`str`, default
|
72 |
-
subsets (`str` or `list[str]
|
73 |
of {'commercial', 'non_commercial', 'other'}.
|
74 |
**kwargs: Keyword arguments forwarded to `BuilderConfig`.
|
75 |
"""
|
76 |
-
|
|
|
|
|
77 |
subsets = [subsets] if isinstance(subsets, str) else subsets
|
78 |
subsets_name = "+".join(subsets)
|
79 |
name = f"{date}.{subsets_name}"
|
@@ -88,7 +114,7 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
88 |
VERSION = datasets.Version("1.0.0")
|
89 |
BUILDER_CONFIG_CLASS = OpenAccessConfig
|
90 |
BUILDER_CONFIGS = [OpenAccessConfig(subsets="all")] + [OpenAccessConfig(subsets=subset) for subset in _SUBSETS]
|
91 |
-
DEFAULT_CONFIG_NAME = f"{
|
92 |
|
93 |
def _info(self):
|
94 |
return datasets.DatasetInfo(
|
@@ -111,27 +137,17 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
111 |
)
|
112 |
|
113 |
def _split_generators(self, dl_manager):
|
114 |
-
|
|
|
115 |
paths = []
|
116 |
for subset in self.config.subsets:
|
117 |
-
url = _URL.format(subset=_SUBSETS[subset])
|
118 |
-
basename = f"{_SUBSETS[subset]}_txt."
|
119 |
# Baselines
|
120 |
-
|
121 |
-
baseline_urls = [
|
122 |
-
(f"{url}{basename}{baseline}.filelist.csv", f"{url}{basename}{baseline}.tar.gz")
|
123 |
-
for baseline in baselines
|
124 |
-
]
|
125 |
# Incremental
|
126 |
-
date_delta = datetime.date.fromisoformat(self.config.date) - datetime.date.fromisoformat(_BASELINE_DATE)
|
127 |
-
incremental_dates = [
|
128 |
-
(datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
|
129 |
-
for i in range(date_delta.days)
|
130 |
-
]
|
131 |
-
incrementals = [f"incr.{date}" for date in incremental_dates]
|
132 |
incremental_urls = [
|
133 |
-
|
134 |
-
for
|
|
|
135 |
]
|
136 |
paths += dl_manager.download(baseline_urls + incremental_urls)
|
137 |
|
|
|
15 |
"""PMC Open Access Subset."""
|
16 |
|
17 |
import datetime
|
18 |
+
from functools import lru_cache
|
19 |
|
20 |
+
import fsspec
|
21 |
import pandas as pd
|
22 |
|
23 |
import datasets
|
|
|
54 |
"non_commercial": "oa_noncomm",
|
55 |
"other": "oa_other",
|
56 |
}
|
57 |
+
|
58 |
+
|
59 |
+
@lru_cache(maxsize=None)
|
60 |
+
def request_data_urls():
|
61 |
+
fs = fsspec.filesystem("https")
|
62 |
+
result = {}
|
63 |
+
for subset, subset_url in _SUBSETS.items():
|
64 |
+
urls = fs.ls(_URL.format(subset=subset_url), detail=False)
|
65 |
+
baseline_urls = [
|
66 |
+
url for url in urls for filename in url.split("/")[-1:] if filename.startswith(f"{subset_url}_txt.PMC")
|
67 |
+
]
|
68 |
+
baseline_date = parse_date(baseline_urls[0])
|
69 |
+
baseline_file_list_urls = [url for url in baseline_urls if url.endswith(".csv")]
|
70 |
+
baseline_archive_urls = [url for url in baseline_urls if url.endswith(".tar.gz")]
|
71 |
+
incremental_urls = [
|
72 |
+
url for url in urls for filename in url.split("/")[-1:] if filename.startswith(f"{subset_url}_txt.incr.")
|
73 |
+
]
|
74 |
+
incremental_file_list_urls = [url for url in incremental_urls if url.endswith(".csv")]
|
75 |
+
incremental_archive_urls = [url for url in incremental_urls if url.endswith(".tar.gz")]
|
76 |
+
result["baseline_date"] = baseline_date
|
77 |
+
result[subset] = {
|
78 |
+
"baseline_urls": list(zip(baseline_file_list_urls, baseline_archive_urls)),
|
79 |
+
"incremental_urls": list(zip(incremental_file_list_urls, incremental_archive_urls)),
|
80 |
+
}
|
81 |
+
return result
|
82 |
+
|
83 |
+
|
84 |
+
def parse_date(url):
|
85 |
+
return url.split("/")[-1].split(".")[-3]
|
86 |
|
87 |
|
88 |
class OpenAccessConfig(datasets.BuilderConfig):
|
89 |
"""BuilderConfig for the PMC Open Access Subset."""
|
90 |
|
91 |
+
def __init__(self, date=None, subsets="all", **kwargs):
|
92 |
"""BuilderConfig for the PMC Open Access Subset.
|
93 |
|
94 |
Args:
|
95 |
+
date (`str`, default BASELINE_DATE) : Up to date, in ISO format. Pass 'latest' for latest date.
|
96 |
+
subsets (`str` or `list[str]`, default 'all'): List of subsets to load. Possible values are 'all' or any combination
|
97 |
of {'commercial', 'non_commercial', 'other'}.
|
98 |
**kwargs: Keyword arguments forwarded to `BuilderConfig`.
|
99 |
"""
|
100 |
+
if date is None:
|
101 |
+
date = request_data_urls()["baseline_date"]
|
102 |
+
date = datetime.date.today().isoformat() if date == "latest" else date
|
103 |
subsets = [subsets] if isinstance(subsets, str) else subsets
|
104 |
subsets_name = "+".join(subsets)
|
105 |
name = f"{date}.{subsets_name}"
|
|
|
114 |
VERSION = datasets.Version("1.0.0")
|
115 |
BUILDER_CONFIG_CLASS = OpenAccessConfig
|
116 |
BUILDER_CONFIGS = [OpenAccessConfig(subsets="all")] + [OpenAccessConfig(subsets=subset) for subset in _SUBSETS]
|
117 |
+
DEFAULT_CONFIG_NAME = f"{request_data_urls()['baseline_date']}.all"
|
118 |
|
119 |
def _info(self):
|
120 |
return datasets.DatasetInfo(
|
|
|
137 |
)
|
138 |
|
139 |
def _split_generators(self, dl_manager):
|
140 |
+
urls = request_data_urls()
|
141 |
+
date = datetime.date.fromisoformat(self.config.date)
|
142 |
paths = []
|
143 |
for subset in self.config.subsets:
|
|
|
|
|
144 |
# Baselines
|
145 |
+
baseline_urls = urls[subset]["baseline_urls"]
|
|
|
|
|
|
|
|
|
146 |
# Incremental
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
incremental_urls = [
|
148 |
+
url_pair
|
149 |
+
for url_pair in urls[subset]["incremental_urls"]
|
150 |
+
if datetime.date.fromisoformat(parse_date(url_pair[0])) <= date
|
151 |
]
|
152 |
paths += dl_manager.download(baseline_urls + incremental_urls)
|
153 |
|