# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PMC Open Access Subset.""" import datetime import warnings import pandas as pd import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ _DESCRIPTION = """\ The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under license terms that allow reuse. Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more liberal redistribution and reuse than a traditional copyrighted work. The PMC Open Access Subset is one part of the PMC Article Datasets """ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" _URL = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/{subset}/txt/" _SUBSETS = { "commercial": "oa_comm", "non_commercial": "oa_noncomm", "other": "oa_other", } _BASELINE_DATE = "2021-12-17" class PmcOpenAccessConfig(datasets.BuilderConfig): """BuilderConfig for the PMC Open Access Subset.""" def __init__(self, subsets=None, **kwargs): """BuilderConfig for the PMC Open Access Subset. Args: subsets (:obj:`List[str]`): List of subsets/groups to load. **kwargs: Keyword arguments forwarded to super. """ subsets = [subsets] if isinstance(subsets, str) else subsets super().__init__( name="+".join(subsets), **kwargs, ) self.subsets = subsets if subsets != ["all"] else list(_SUBSETS.keys()) class PmcOpenAccess(datasets.GeneratorBasedBuilder): """PMC Open Access Subset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIG_CLASS = PmcOpenAccessConfig BUILDER_CONFIGS = [PmcOpenAccessConfig(subsets="all")] + [ PmcOpenAccessConfig(subsets=subset) for subset in _SUBSETS ] DEFAULT_CONFIG_NAME = "all" def _info(self): warnings.warn( "Dataset 'pmc_open_access' is deprecated and will be deleted. Use 'pmc/open_access' instead.", FutureWarning, ) return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), "pmid": datasets.Value("string"), "accession_id": datasets.Value("string"), "license": datasets.Value("string"), "last_updated": datasets.Value("string"), "retracted": datasets.Value("string"), "citation": datasets.Value("string"), } ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): for subset in self.config.subsets: url = _URL.format(subset=_SUBSETS[subset]) basename = f"{_SUBSETS[subset]}_txt." # Baselines baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(9)] # baseline_urls = { # "baseline_file_lists": [f"{url}{basename}{baseline}.filelist.csv" for baseline in baselines], # "baseline_archives": [f"{url}{basename}{baseline}.tar.gz" for baseline in baselines], # } # baseline_paths = dl_manager.download(baseline_urls) baseline_file_lists = [] baseline_archives = [] for baseline in baselines: baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" try: baseline_file_list = dl_manager.download(baseline_file_list_url) except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist continue baseline_archive_url = f"{url}{basename}{baseline}.tar.gz" try: baseline_archive = dl_manager.download(baseline_archive_url) except FileNotFoundError: continue baseline_file_lists.append(baseline_file_list) baseline_archives.append(baseline_archive) # Incremental date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE) incremental_dates = [ (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat() for i in range(date_delta.days) ] incrementals = [f"incr.{date}" for date in incremental_dates] incremental_urls = { "incremental_file_lists": [ f"{url}{basename}{incremental}.filelist.csv" for incremental in incrementals ], "incremental_archives": [f"{url}{basename}{incremental}.tar.gz" for incremental in incrementals], } incremental_paths = dl_manager.download(incremental_urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "baseline_file_lists": baseline_file_lists, "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives], "incremental_file_lists": incremental_paths["incremental_file_lists"], "incremental_archives": [ dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"] ], }, ), ] def _generate_examples(self, baseline_file_lists, baseline_archives, incremental_file_lists, incremental_archives): key = 0 # Baselines for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives): try: baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index") for path, file in baseline_archive: data = baselines.pop(path) content = file.read() try: text = content.decode("utf-8") except UnicodeDecodeError as e: text = content.decode("latin-1") data = { "text": text, "pmid": data["PMID"], "accession_id": data["AccessionID"], "license": data["License"], "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], "retracted": data["Retracted"], "citation": data["Article Citation"], } yield key, data key += 1 except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist continue # Incrementals if incremental_file_lists: for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives): import pdb pdb.set_trace() incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index") for path, file in incremental_archive: data = incrementals.pop(path) content = file.read() try: text = content.decode("utf-8") except UnicodeDecodeError as e: text = content.decode("latin-1") data = { "text": text, "pmid": data["PMID"], "accession_id": data["AccessionID"], "license": data["License"], "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], "retracted": data["Retracted"], "citation": data["Article Citation"], } yield key, data key += 1