File size: 5,870 Bytes
33bd5b5 d42b8f9 33bd5b5 d42b8f9 33bd5b5 d42b8f9 33bd5b5 d42b8f9 33bd5b5 d42b8f9 33bd5b5 2fbe51b d42b8f9 33bd5b5 d42b8f9 33bd5b5 d42b8f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
"""PMC-OA Dataset"""
import os
import jsonlines
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{lin2023pmc,
title={PMC-CLIP: Contrastive Language-Image Pre-training using Biomedical Documents},
author={Lin, Weixiong and Zhao, Ziheng and Zhang, Xiaoman and Wu, Chaoyi and Zhang, Ya and Wang, Yanfeng and Xie, Weidi},
journal={arXiv preprint arXiv:2303.07240},
year={2023}
}
"""
_DESCRIPTION = """\
Foundation models trained on large-scale dataset gain a recent surge in CV and NLP. In contrast, development in biomedical domain lags far behind due to data scarcity.
To address this issue, we build and release PMC-OA, a biomedical dataset with 1.6M image-caption pairs collected from PubMedCentral's OpenAccess subset, which is 8 times larger than before.
PMC-OA covers diverse modalities or diseases, with majority of the image-caption samples aligned at finer-grained level, i.e., subfigure and subcaption.
While pretraining a CLIP-style model on PMC-OA, our model named PMC-CLIP achieves state-of-the-art results on various downstream tasks,
including image-text retrieval on ROCO, MedMNIST image classification, Medical VQA, i.e. +8.1% R@10 on image-text retrieval, +3.9% accuracy on image classification.
"""
_HOMEPAGE = "https://weixionglin.github.io/PMC-CLIP/"
_URLs = {
"images": "https://huggingface.co/datasets/axiong/pmc_oa/resolve/main/images.zip",
"pmc_oa_beta": "https://huggingface.co/datasets/axiong/pmc_oa/resolve/main/pmc_oa_beta.jsonl",
"pmc_oa": "https://huggingface.co/datasets/axiong/pmc_oa/resolve/main/pmc_oa.jsonl",
}
class PMC_OA_Config(datasets.BuilderConfig):
"""BuilderConfig for PMC_OA"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PMC_OA_Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class PMC_OA(datasets.GeneratorBasedBuilder):
"""PMC_OA Dataset"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
PMC_OA_Config(
name="pmc_oa_beta",
description="<subfigure, caption> pairs. Subfigures detected by a DETR model.",
),
PMC_OA_Config(
name="pmc_oa",
description="<subfigure, subcaption> pairs. Subfigures detected by a DETR model. Subcaptions detected by ChatGPT and aligned with subfigures.",
),
]
def _info(self):
if self.config.name == "pmc_oa_beta":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Value("string"),
"caption": datasets.Value("string"),
}
),
supervised_keys=None,
citation=_CITATION,
homepage=_HOMEPAGE,
)
elif self.config.name == "pmc_oa":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Value("string"),
"caption": datasets.Value("string"),
"alignment_type": datasets.Value("string"),
"alignment_score": datasets.Value("float"),
}
),
supervised_keys=None,
citation=_CITATION,
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_files = dl_manager.download_and_extract(_URLs)
if self.config.name == "pmc_oa_beta":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["pmc_oa_beta"], "image_dir": downloaded_files['images']}
)
]
elif self.config.name == "pmc_oa":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["pmc_oa"], "image_dir": downloaded_files['images']}
)
]
def _generate_examples(self, filepath, image_dir):
"""Yields examples."""
logger.info("generating examples from = %s", filepath)
with jsonlines.open(filepath) as reader:
for _id, obj in enumerate(reader):
if self.config.name == "pmc_oa_beta":
relative_image_path = obj['image']
image_path = os.path.join(image_dir, "caption_T060_filtered_top4_sep_v0_subfigures", relative_image_path)
caption = obj['caption']
yield _id, {
"image": {
"path": image_path,
"bytes": open(image_path, "rb").read(),
},
"caption": caption,
}
elif self.config.name == "pmc_oa":
relative_image_path = obj['image']
image_path = os.path.join(image_dir, "caption_T060_filtered_top4_sep_v0_subfigures", relative_image_path)
caption = obj['caption']
alignment_type = obj['alignment_type']
alignment_score = obj['alignment_score']
yield _id, {
"image": {
"path": image_path,
"bytes": open(image_path, "rb").read(),
},
"caption": caption,
"alignment_type": alignment_type,
"alignment_score": alignment_score,
}
|