|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PMC Open Access Subset of figures with captions""" |
|
from huggingface_hub import hf_hub_url |
|
import datetime |
|
import pandas as pd |
|
import numpy as np |
|
from itertools import compress, chain |
|
from collections import defaultdict |
|
import os |
|
import re |
|
from lxml import etree |
|
import unicodedata |
|
import html |
|
import json |
|
from PIL import Image |
|
import tarfile |
|
|
|
import datasets |
|
from datasets.tasks import LanguageModeling |
|
|
|
from PIL import ImageFile |
|
import mimetypes |
|
ImageFile.LOAD_TRUNCATED_IMAGES = True |
|
|
|
|
|
|
|
_CITATION = "" |
|
|
|
_DESCRIPTION = """\ |
|
The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under |
|
license terms that allow reuse. |
|
Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles |
|
in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more |
|
liberal redistribution and reuse than a traditional copyrighted work. |
|
The PMC Open Access Subset is one part of the PMC Article Datasets |
|
|
|
This version focus on associating the graphics of figures with their captions |
|
""" |
|
|
|
_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/" |
|
|
|
|
|
_LICENSE = """ |
|
https://www.ncbi.nlm.nih.gov/pmc/about/copyright/ |
|
|
|
Within the PMC Open Access Subset, there are three groupings: |
|
|
|
Commercial Use Allowed - CC0, CC BY, CC BY-SA, CC BY-ND licenses |
|
Non-Commercial Use Only - CC BY-NC, CC BY-NC-SA, CC BY-NC-ND licenses; and |
|
Other - no machine-readable Creative Commons license, no license, or a custom license. |
|
""" |
|
|
|
_URL_ROOT = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/" |
|
_URL = _URL_ROOT+"oa_bulk/{subset}/xml/" |
|
|
|
_SUBSETS = { |
|
"commercial": "oa_comm", |
|
"non_commercial": "oa_noncomm", |
|
"other": "oa_other", |
|
} |
|
_BASELINE_DATE = "2023-12-18" |
|
|
|
begin_doc_rgx = re.compile("""<!DOCTYPE.*""") |
|
def clean_raw(xml_text): |
|
""" |
|
Fixes the formating of xml of files and returns it. |
|
Some have bad formating but they can be fixed/improved |
|
""" |
|
|
|
|
|
|
|
begin_doc = begin_doc_rgx.search(xml_text) |
|
if begin_doc is None: |
|
return xml_text |
|
xml_text = xml_text[begin_doc.start():] |
|
|
|
return xml_text |
|
|
|
def get_extensions_for_type(general_type): |
|
for ext in mimetypes.types_map: |
|
if mimetypes.types_map[ext].split('/')[0] == general_type: |
|
yield ext |
|
|
|
IMAGE_EXT = list(get_extensions_for_type('image')) |
|
|
|
def extract_captions(article_tree): |
|
ref_el_l = article_tree.xpath(".//fig") |
|
figure_captions = [] |
|
graphic_names = [] |
|
for el in ref_el_l: |
|
graphic_l = el.xpath(".//graphic") |
|
if len(graphic_l) == 0: |
|
continue |
|
graphic_el = graphic_l[0] |
|
graphic_names.append(graphic_el.get("{http://www.w3.org/1999/xlink}href")) |
|
text = " ".join(el.itertext()) |
|
text = unicodedata.normalize("NFKD", html.unescape(text)) |
|
figure_captions.append(text) |
|
return figure_captions, graphic_names |
|
|
|
class OpenAccessFigureConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for the PMC Open Access Subset.""" |
|
|
|
def __init__(self, subsets=None, **kwargs): |
|
"""BuilderConfig for the PMC Open Access Subset. |
|
Args: |
|
subsets (:obj:`List[str]`): List of subsets/groups to load. |
|
**kwargs: Keyword arguments forwarded to super. |
|
""" |
|
subsets = [subsets] if isinstance(subsets, str) else subsets |
|
super().__init__( |
|
name="+".join(subsets), **kwargs, |
|
) |
|
self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys()) |
|
|
|
class OpenAccessFigure(datasets.GeneratorBasedBuilder): |
|
"""PMC Open Access Subset for figure and captions""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIG_CLASS = OpenAccessFigureConfig |
|
BUILDER_CONFIGS = [OpenAccessFigureConfig(subsets="all")] + [OpenAccessFigureConfig(subsets=subset) for subset in _SUBSETS] |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"accession_id": datasets.Value("string"), |
|
"pmid": datasets.Value("string"), |
|
"figure_idx": datasets.Value("int16"), |
|
"figure_fn": datasets.Value("string"), |
|
"figure": datasets.Image(), |
|
"caption": datasets.Value("string"), |
|
"license": datasets.Value("string"), |
|
"retracted": datasets.Value("string"), |
|
"last_updated": datasets.Value("string"), |
|
"citation": datasets.Value("string"), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=[LanguageModeling(text_column="content")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
baseline_package_list = dl_manager.download(f"{_URL_ROOT}oa_file_list.csv") |
|
baseline_file_list_l, incremental_file_list_l = [], [] |
|
for subset in self.config.subsets: |
|
url = _URL.format(subset=_SUBSETS[subset]) |
|
basename = f"{_SUBSETS[subset]}_xml." |
|
baseline_file_list_urls = [f"{url}{basename}PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}.filelist.csv" for i in range(10) if (subset!="non_commercial" or i>0)] |
|
baseline_file_list_l.extend(dl_manager.download(baseline_file_list_urls)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID") |
|
oa_package_list = oa_package_list[["File"]] |
|
figure_archives = [] |
|
df_l = [] |
|
set_article = set() |
|
for l, baseline_file_list in enumerate(baseline_file_list_l): |
|
try: |
|
file_list = pd.read_csv(baseline_file_list, index_col="AccessionID") |
|
except FileNotFoundError: |
|
continue |
|
file_list = file_list.join(oa_package_list).reset_index().set_index("Article File") |
|
file_list.File = file_list.File.fillna('') |
|
|
|
|
|
figure_url_l = list(_URL_ROOT + file_list.File) |
|
|
|
|
|
figure_archives.append(dl_manager.download(figure_url_l)) |
|
|
|
|
|
df_l.append(file_list) |
|
|
|
|
|
|
|
package_df = pd.concat(df_l).reset_index() |
|
figure_archives = list(chain(*figure_archives)) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"figure_archive_lists": self.archive_generator(dl_manager, figure_archives, "train"), |
|
"package_df": package_df[np.arange(len(package_df))%10 < 8], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"figure_archive_lists": self.archive_generator(dl_manager, figure_archives, "test"), |
|
"package_df": package_df[np.arange(len(package_df))%10 == 8], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"figure_archive_lists": self.archive_generator(dl_manager, figure_archives, "validation"), |
|
"package_df": package_df[np.arange(len(package_df))%10 == 9], |
|
}, |
|
), |
|
] |
|
|
|
def archive_generator(self, dl_manager, figure_archives, name): |
|
if name == "train": |
|
for k, archive in enumerate(figure_archives): |
|
if k%10 < 8: |
|
yield dl_manager.iter_archive(archive) |
|
elif name == "test": |
|
for k, archive in enumerate(figure_archives[8::10]): |
|
yield dl_manager.iter_archive(archive) |
|
elif name == "validation": |
|
for k, archive in enumerate(figure_archives[9::10]): |
|
yield dl_manager.iter_archive(archive) |
|
|
|
def _generate_examples(self, figure_archive_lists, package_df): |
|
|
|
for i, figure_archive in enumerate(figure_archive_lists): |
|
data = package_df.iloc[i] |
|
f_d = defaultdict(lambda: {}) |
|
file_xml = None |
|
try: |
|
for path, file in figure_archive: |
|
bn, ext = os.path.splitext(os.path.basename(path)) |
|
if ext in [".nxml", ".xml"]: |
|
content = file.read() |
|
try: |
|
text = content.decode("utf-8").strip() |
|
except UnicodeDecodeError as e: |
|
text = content.decode("latin-1").strip() |
|
text = clean_raw(text) |
|
article_tree = etree.ElementTree(etree.fromstring(text)) |
|
figure_captions, graphic_names = extract_captions(article_tree) |
|
break |
|
|
|
for path, file in figure_archive: |
|
bn, ext = os.path.splitext(os.path.basename(path)) |
|
if ext in IMAGE_EXT and bn in graphic_names: |
|
f_d[ext][bn] = Image.open(file, mode="r") |
|
|
|
image_d = {} |
|
for ext in [".tif", ".jpg", ".png", ".gif"]: |
|
for bn, image in f_d[ext].items(): |
|
if bn not in image_d.keys(): |
|
image_d[bn] = image |
|
|
|
for j, (caption, graph_name) in enumerate(zip(figure_captions, graphic_names)): |
|
if graph_name in image_d.keys(): |
|
yield (f"{data['AccessionID']}_{str(j+1)}", |
|
{"figure": image_d[graph_name], |
|
"caption":caption, |
|
"pmid": data["PMID"], |
|
"accession_id": data['AccessionID'], |
|
"figure_idx": j+1, |
|
"figure_fn": graph_name, |
|
"license": data["License"], |
|
"last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
|
"retracted": data["Retracted"], |
|
"citation": data["Article Citation"]}) |
|
except: |
|
continue |
|
|