File size: 19,268 Bytes
6cab2ed fd8d97d 6cab2ed 9ad4b18 6cab2ed 3d3f8b5 6cab2ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 |
"""
BIOSCAN-5M Dataset Loader
Author: Zahra Gharaee (https://github.com/zahrag)
License: MIT License
Description:
This custom dataset loader provides structured access to the BIOSCAN-5M dataset,
which includes millions of annotated insect images and associated metadata
for machine learning and biodiversity research. It supports multiple image resolutions
(e.g., cropped and original), and predefined splits for training, evaluation,
and pretraining. The loader integrates with the Hugging Face `datasets` library
to simplify data access and preparation.
Usage
To load the dataset from dataset.py:
from datasets import load_dataset
ds = load_dataset("dataset.py", name="cropped_256_eval", split="validation", trust_remote_code=True)
"""
import os
import csv
import datasets
import json
_CITATION = """\n----Citation:\n@inproceedings{gharaee2024bioscan5m,
title={{BIOSCAN-5M}: A Multimodal Dataset for Insect Biodiversity},
booktitle={Advances in Neural Information Processing Systems},
author={Zahra Gharaee and Scott C. Lowe and ZeMing Gong and Pablo Millan Arias
and Nicholas Pellegrino and Austin T. Wang and Joakim Bruslund Haurum
and Iuliia Zarubiieva and Lila Kari and Dirk Steinke and Graham W. Taylor
and Paul Fieguth and Angel X. Chang},
editor={A. Globerson and L. Mackey and D. Belgrave and A. Fan and U. Paquet and J. Tomczak and C. Zhang},
pages={36285--36313},
publisher={Curran Associates, Inc.},
year={2024},
volume={37},
url={https://proceedings.neurips.cc/paper_files/paper/2024/file/3fdbb472813041c9ecef04c20c2b1e5a-Paper-Datasets_and_Benchmarks_Track.pdf}
}\n"""
_DESCRIPTION = (
"\n----Description:\n'BIOSCAN-5M' is a comprehensive multimodal dataset containing data for over 5 million insect specimens.\n"
"Released in 2024, this dataset substantially enhances existing image-based biological resources by incorporating:\n"
"- Taxonomic labels\n- Raw nucleotide barcode sequences \n- Assigned barcode index numbers\n- Geographical information\n"
"- Specimen size information\n\n"
"-------------- Dataset Feature Descriptions --------------\n"
"1- processid: A unique number assigned by BOLD (International Barcode of Life Consortium).\n"
"2- sampleid: A unique identifier given by the collector.\n"
"3- taxon: Bio.info: Most specific taxonomy rank.\n"
"4- phylum: Bio.info: Taxonomic classification label at phylum rank.\n"
"5- class: Bio.info: Taxonomic classification label at class rank.\n"
"6- order: Bio.info: Taxonomic classification label at order rank.\n"
"7- family: Bio.info: Taxonomic classification label at family rank.\n"
"8- subfamily: Bio.info: Taxonomic classification label at subfamily rank.\n"
"9- genus: Bio.info: Taxonomic classification label at genus rank.\n"
"10- species: Bio.info: Taxonomic classification label at species rank.\n"
"11- dna_bin: Bio.info: Barcode Index Number (BIN).\n"
"12- dna_barcode: Bio.info: Nucleotide barcode sequence.\n"
"13- country: Geo.info: Country associated with the site of collection.\n"
"14- province_state: Geo.info: Province/state associated with the site of collection.\n"
"15- coord-lat: Geo.info: Latitude (WGS 84; decimal degrees) of the collection site.\n"
"16- coord-lon: Geo.info: Longitude (WGS 84; decimal degrees) of the collection site.\n"
"17- image_measurement_value: Size.info: Number of pixels occupied by the organism.\n"
"18- area_fraction: Size.info: Fraction of the original image the cropped image comprises.\n"
"19- scale_factor: Size.info: Ratio of the cropped image to the cropped_256 image.\n"
"20- inferred_ranks: An integer indicating at which taxonomic ranks the label is inferred.\n"
"21- split: Split set (partition) the sample belongs to.\n"
"22- index_bioscan_1M_insect: An index to locate organism in BIOSCAN-1M Insect metadata.\n"
"23- chunk: The packaging subdirectory name (or empty string) for this image.\n"
)
license = "\n----License:\nCC BY 3.0: Creative Commons Attribution 3.0 Unported (https://creativecommons.org/licenses/by/3.0/)\n"
SUPPORTED_FORMATS = {"csv": "csv", "jsonld": "jsonld"}
SUPPORTED_PACKAGES = {
"original_256": "BIOSCAN_5M_original_256.zip",
"original_256_pretrain": "BIOSCAN_5M_original_256_pretrain.zip",
"original_256_train": "BIOSCAN_5M_original_256_train.zip",
"original_256_eval": "BIOSCAN_5M_original_256_eval.zip",
"cropped_256": "BIOSCAN_5M_cropped_256.zip",
"cropped_256_pretrain": "BIOSCAN_5M_cropped_256_pretrain.zip",
"cropped_256_train": "BIOSCAN_5M_cropped_256_train.zip",
"cropped_256_eval": "BIOSCAN_5M_cropped_256_eval.zip",
}
def safe_cast(value, cast_type):
try:
return cast_type(value) if value else None
except ValueError:
return None
def extract_info_from_filename(package_name):
"""
Extract imgtype and split_name using string ops.
Assumes package_name format: BIOSCAN_5M_<imgtype>[_<split_name>].zip
"""
if package_name not in SUPPORTED_PACKAGES.values():
raise ValueError(
f"Unsupported package: {package_name}\n"
f"Supported packages are:\n - " + "\n - ".join(sorted(SUPPORTED_PACKAGES.values()))
)
# Remove prefix and suffix
core = package_name.replace("BIOSCAN_5M_", "").replace(".zip", "")
parts = core.split("_")
if len(parts) == 2:
imgtype = "_".join(parts)
data_split = "full"
elif len(parts) == 3:
imgtype = "_".join(parts[:2])
data_split = parts[2]
else:
imgtype, data_split = None, None # Unexpected format
return imgtype, data_split
class BIOSCAN5MConfig(datasets.BuilderConfig):
def __init__(self, metadata_format="csv", package_name="BIOSCAN_5M_cropped_256.zip", **kwargs):
super().__init__(**kwargs)
self.metadata_format = metadata_format
self.package_name = package_name
class BIOSCAN5M(datasets.GeneratorBasedBuilder):
"""Custom dataset loader for BIOSCAN-5M (images + metadata)."""
BUILDER_CONFIGS = [
BIOSCAN5MConfig(
name="cropped_256_eval",
version=datasets.Version("0.0.0"),
description="Cropped_256 images for evaluation splits.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["cropped_256_eval"],
),
BIOSCAN5MConfig(
name="cropped_256_train",
version=datasets.Version("0.0.0"),
description="Cropped_256 images for training split.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["cropped_256_train"],
),
BIOSCAN5MConfig(
name="cropped_256_pretrain",
version=datasets.Version("0.0.0"),
description="Cropped images for pretraining split.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["cropped_256_pretrain"],
),
BIOSCAN5MConfig(
name="cropped_256",
version=datasets.Version("0.0.0"),
description="Cropped_256 images for full splits.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["cropped_256"],
),
BIOSCAN5MConfig(
name="original_256_eval",
version=datasets.Version("0.0.0"),
description="Original_256 images for evaluation splits.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["original_256_eval"],
),
BIOSCAN5MConfig(
name="original_256_train",
version=datasets.Version("0.0.0"),
description="Original_256 images for training split.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["original_256_train"],
),
BIOSCAN5MConfig(
name="original_256_pretrain",
version=datasets.Version("0.0.0"),
description="Original images for pretraining split.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["original_256_pretrain"],
),
BIOSCAN5MConfig(
name="original_256",
version=datasets.Version("0.0.0"),
description="Original_256 images for full splits.",
metadata_format=SUPPORTED_FORMATS["csv"],
package_name=SUPPORTED_PACKAGES["original_256"],
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"processid": datasets.Value("string"),
"sampleid": datasets.Value("string"),
"taxon": datasets.Value("string"),
"phylum": datasets.Value("string"),
"class": datasets.Value("string"),
"order": datasets.Value("string"),
"family": datasets.Value("string"),
"subfamily": datasets.Value("string"),
"genus": datasets.Value("string"),
"species": datasets.Value("string"),
"dna_bin": datasets.Value("string"),
"dna_barcode": datasets.Value("string"),
"country": datasets.Value("string"),
"province_state": datasets.Value("string"),
"coord-lat": datasets.Value("float"),
"coord-lon": datasets.Value("float"),
"image_measurement_value": datasets.Value("int64"),
"area_fraction": datasets.Value("float"),
"scale_factor": datasets.Value("float"),
"inferred_ranks": datasets.Value("int32"),
"split": datasets.Value("string"),
"index_bioscan_1M_insect": datasets.Value("int32"),
"chunk": datasets.Value("string"),
}),
supervised_keys=None,
homepage="https://huggingface.co/datasets/bioscan-ml/BIOSCAN-5M",
citation=_CITATION,
license=license,
)
def _split_generators(self, dl_manager, **kwargs ):
"""Custom dataset split generator"""
metadata_format = self.config.metadata_format
package_name = self.config.package_name
imgtype, data_split = extract_info_from_filename(package_name)
# Download metadata
metadata_url = "https://huggingface.co/datasets/bioscan-ml/BIOSCAN-5M/resolve/main/BIOSCAN_5M_Insect_Dataset_metadata_MultiTypes.zip"
metadata_archive = dl_manager.download_and_extract(metadata_url)
metadata_file = os.path.join(
metadata_archive,
f"bioscan5m/metadata/{metadata_format}/BIOSCAN_5M_Insect_Dataset_metadata.{metadata_format}"
)
# Download image archives
image_url = f"https://huggingface.co/datasets/bioscan-ml/BIOSCAN-5M/resolve/main/{package_name}"
image_archives = dl_manager.download_and_extract([image_url])
image_dirs = [archive for archive in image_archives]
# Define all available splits
eval_splits = [
"val", "test", "val_unseen", "test_unseen", "key_unseen", "other_heldout"
]
splits = ["pretrain", "train"] + eval_splits
hf_splits = {
"train": datasets.Split.TRAIN,
"val": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
if data_split == "full": # All partitions
return [
datasets.SplitGenerator(
name=hf_splits.get(split, split),
gen_kwargs={
"metadata_path": metadata_file,
"image_dirs": image_dirs,
"split": split,
"imgtype": imgtype,
},
)
for split in splits
]
elif data_split == "eval": # Evaluation partitions
return [
datasets.SplitGenerator(
name=hf_splits.get(split, split),
gen_kwargs={
"metadata_path": metadata_file,
"image_dirs": image_dirs,
"split": split,
"imgtype": imgtype,
},
)
for split in eval_splits
]
else: # train and pretrain partitions
return [
datasets.SplitGenerator(
name=hf_splits.get(data_split, data_split),
gen_kwargs={
"metadata_path": metadata_file,
"image_dirs": image_dirs,
"split": data_split,
"imgtype": imgtype,
},
)
]
def _generate_examples(self, metadata_path, image_dirs, split, imgtype):
if metadata_path.endswith(".csv"):
with open(metadata_path, encoding="utf-8") as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
if row["split"] != split:
continue # Skip others and keep the chosen split samples
processid = row["processid"]
chunk = row.get("chunk", "").strip() if row.get("chunk") else ""
# Construct expected relative path
if chunk == "":
rel_path = f"bioscan5m/images/{imgtype}/{split}/{processid}.jpg"
else:
rel_path = f"bioscan5m/images/{imgtype}/{split}/{chunk}/{processid}.jpg"
# Search for the image file inside extracted image_dirs
image_path = None
for image_dir in image_dirs:
potential_path = os.path.join(image_dir, rel_path)
if os.path.exists(potential_path):
image_path = potential_path
break # Image found; end search
if image_path is None:
print(f" ---- Image NOT Found! ---- \n{potential_path}")
continue
yield idx, {
"image": image_path,
"processid": row["processid"],
"sampleid": row["sampleid"],
"taxon": row["taxon"],
"phylum": row["phylum"] or None,
"class": row["class"] or None,
"order": row["order"] or None,
"family": row["family"] or None,
"subfamily": row["subfamily"] or None,
"genus": row["genus"] or None,
"species": row["species"] or None,
"dna_bin": row["dna_bin"] or None,
"dna_barcode": row["dna_barcode"],
"country": row["country"] or None,
"province_state": row["province_state"] or None,
"coord-lat": safe_cast(row["coord-lat"], float),
"coord-lon": safe_cast(row["coord-lon"], float),
"image_measurement_value": safe_cast(row["image_measurement_value"], float),
"area_fraction": safe_cast(row["area_fraction"], float),
"scale_factor": safe_cast(row["scale_factor"], float),
"inferred_ranks": safe_cast(row["inferred_ranks"], int),
"split": row["split"],
"index_bioscan_1M_insect": safe_cast(row["index_bioscan_1M_insect"], float),
"chunk": row["chunk"] or None,
}
elif metadata_path.endswith(".jsonld"):
with open(metadata_path, encoding="utf-8") as f:
metadata = json.load(f)
for idx, row in enumerate(metadata):
if row["split"] != split:
continue # Skip others and keep the chosen split samples
processid = row["processid"]
chunk = row.get("chunk", "").strip() if row.get("chunk") else ""
# Construct expected relative path
if chunk == "":
rel_path = f"bioscan5m/images/{imgtype}/{split}/{processid}.jpg"
else:
rel_path = f"bioscan5m/images/{imgtype}/{split}/{chunk}/{processid}.jpg"
# Search for the image file inside extracted image_dirs
image_path = None
for image_dir in image_dirs:
potential_path = os.path.join(image_dir, rel_path)
if os.path.exists(potential_path):
image_path = potential_path
break # Image found; end search
if image_path is None:
print(f" ---- Image NOT Found! ---- \n{potential_path}")
continue
yield idx, {
"image": image_path,
"processid": row["processid"],
"sampleid": row["sampleid"],
"taxon": row["taxon"],
"phylum": row["phylum"] or None,
"class": row["class"] or None,
"order": row["order"] or None,
"family": row["family"] or None,
"subfamily": row["subfamily"] or None,
"genus": row["genus"] or None,
"species": row["species"] or None,
"dna_bin": row["dna_bin"] or None,
"dna_barcode": row["dna_barcode"],
"country": row["country"] or None,
"province_state": row["province_state"] or None,
"coord-lat": safe_cast(row["coord-lat"], float),
"coord-lon": safe_cast(row["coord-lon"], float),
"image_measurement_value": safe_cast(row["image_measurement_value"], float),
"area_fraction": safe_cast(row["area_fraction"], float),
"scale_factor": safe_cast(row["scale_factor"], float),
"inferred_ranks": safe_cast(row["inferred_ranks"], int),
"split": row["split"],
"index_bioscan_1M_insect": safe_cast(row["index_bioscan_1M_insect"], float),
"chunk": row["chunk"] or None,
}
else:
raise ValueError(
f"Unsupported format: {os.path.splitext(metadata_path.lower())[1]}\n"
f"Supported formats are:\n - " + "\n - ".join(sorted(SUPPORTED_FORMATS.values()))
)
|