|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""NLS Chapbook Illustrations""" |
|
|
|
import collections |
|
import json |
|
import os |
|
from typing import Any, Dict, List |
|
import pandas as pd |
|
import datasets |
|
|
|
_CITATION = """@inproceedings{10.1145/3476887.3476893, |
|
author = {Dutta, Abhishek and Bergel, Giles and Zisserman, Andrew}, |
|
title = {Visual Analysis of Chapbooks Printed in Scotland}, |
|
year = {2021}, |
|
isbn = {9781450386906}, |
|
publisher = {Association for Computing Machinery}, |
|
address = {New York, NY, USA}, |
|
url = {https://doi.org/10.1145/3476887.3476893}, |
|
doi = {10.1145/3476887.3476893}, |
|
abstract = {Chapbooks were short, cheap printed booklets produced in large quantities in Scotland, England, Ireland, North America and much of Europe between roughly the seventeenth and nineteenth centuries. A form of popular literature containing songs, stories, poems, games, riddles, religious writings and other content designed to appeal to a wide readership, they were frequently illustrated, particularly on their title-pages. This paper describes the visual analysis of such chapbook illustrations. We automatically extract all the illustrations contained in the National Library of Scotland Chapbooks Printed in Scotland dataset, and create a visual search engine to search this dataset using full or part-illustrations as queries. We also cluster these illustrations based on their visual content, and provide keyword-based search of the metadata associated with each publication. The visual search; clustering of illustrations based on visual content; and metadata search features enable researchers to forensically analyse the chapbooks dataset and to discover unnoticed relationships between its elements. We release all annotations and software tools described in this paper to enable reproduction of the results presented and to allow extension of the methodology described to datasets of a similar nature.}, |
|
booktitle = {The 6th International Workshop on Historical Document Imaging and Processing}, |
|
pages = {67–72}, |
|
numpages = {6}, |
|
keywords = {illustration detection, chapbooks, image search, visual grouping, printing, digital scholarship, illustration dataset}, |
|
location = {Lausanne, Switzerland}, |
|
series = {HIP '21} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = "This dataset comprises of images from chapbooks held by the National Library of Scotland and digitised and published as its Chapbooks Printed in Scotland dataset" |
|
|
|
|
|
_HOMEPAGE = "https://www.robots.ox.ac.uk/~vgg/research/chapbooks/" |
|
|
|
|
|
_LICENSE = "Public Domain Mark 1.0" |
|
|
|
|
|
_IMAGES_URL = "https://nlsfoundry.s3.amazonaws.com/data/nls-data-chapbooks.zip" |
|
|
|
|
|
_ANNOTATIONS_URL = "https://gitlab.com/davanstrien/nls-chapbooks-illustrations/-/raw/master/data/annotations/step5-manual-verification-image-0-47329_train_coco.json" |
|
|
|
|
|
class NationalLibraryScotlandChapBooksConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for National Library of Scotland Chapbooks dataset.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
super(NationalLibraryScotlandChapBooksConfig, self).__init__( |
|
version=datasets.Version("1.0.0"), |
|
name=name, |
|
description="NLS Chapbook Illustrations", |
|
**kwargs, |
|
) |
|
|
|
|
|
class NationalLibraryScotlandChapBooks(datasets.GeneratorBasedBuilder): |
|
"""National Library of Scotland Chapbooks dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
NationalLibraryScotlandChapBooksConfig("illustration-detection"), |
|
NationalLibraryScotlandChapBooksConfig("image-classification"), |
|
NationalLibraryScotlandChapBooksConfig("image-matching"), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "illustration-detection": |
|
features = datasets.Features( |
|
{ |
|
"image_id": datasets.Value("int64"), |
|
"image": datasets.Image(), |
|
"width": datasets.Value("int32"), |
|
"height": datasets.Value("int32"), |
|
} |
|
) |
|
object_dict = { |
|
"category_id": datasets.ClassLabel( |
|
names=["early_printed_illustration"] |
|
), |
|
"image_id": datasets.Value("string"), |
|
"id": datasets.Value("int64"), |
|
"area": datasets.Value("int64"), |
|
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"segmentation": [[datasets.Value("float32")]], |
|
"iscrowd": datasets.Value("bool"), |
|
} |
|
features["objects"] = [object_dict] |
|
if self.config.name == "image-classification": |
|
features = datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label": datasets.ClassLabel( |
|
num_classes=2, names=["not-illustrated", "illustrated"] |
|
), |
|
} |
|
) |
|
if self.config.name == "image-matching": |
|
features = datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"group-label": datasets.Value("int32"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
images = dl_manager.download_and_extract(_IMAGES_URL) |
|
annotations = dl_manager.download(_ANNOTATIONS_URL) |
|
image_match_annotations = dl_manager.download( |
|
"illustration-group-specifications[83].csv" |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"annotations_file": os.path.join(annotations), |
|
"image_dir": os.path.join(images, "nls-data-chapbooks"), |
|
"image_match_annotations": image_match_annotations, |
|
}, |
|
) |
|
] |
|
|
|
def _get_image_id_to_annotations_mapping( |
|
self, annotations: List[Dict] |
|
) -> Dict[int, List[Dict[Any, Any]]]: |
|
""" |
|
A helper function to build a mapping from image ids to annotations. |
|
""" |
|
image_id_to_annotations = collections.defaultdict(list) |
|
for annotation in annotations: |
|
image_id_to_annotations[annotation["image_id"]].append(annotation) |
|
return image_id_to_annotations |
|
|
|
def _generate_examples(self, annotations_file, image_dir, image_match_annotations): |
|
def _image_info_to_example(image_info, image_dir): |
|
image = image_info["file_name"] |
|
return { |
|
"image_id": image_info["id"], |
|
"image": os.path.join(image_dir, image), |
|
"width": image_info["width"], |
|
"height": image_info["height"], |
|
} |
|
|
|
with open(annotations_file, encoding="utf8") as f: |
|
annotation_data = json.load(f) |
|
images = annotation_data["images"] |
|
annotations = annotation_data["annotations"] |
|
image_id_to_annotations = self._get_image_id_to_annotations_mapping( |
|
annotations |
|
) |
|
|
|
if self.config.name == "illustration-detection": |
|
for idx, image_info in enumerate(images): |
|
example = _image_info_to_example(image_info, image_dir) |
|
annotations = image_id_to_annotations[image_info["id"]] |
|
objects = [] |
|
for annotation in annotations: |
|
category_id = annotation["category_id"] |
|
if category_id == 1: |
|
annotation["category_id"] = 0 |
|
objects.append(annotation) |
|
example["objects"] = objects |
|
yield (idx, example) |
|
if self.config.name == "image-classification": |
|
for idx, image_info in enumerate(images): |
|
annotations = image_id_to_annotations[image_info["id"]] |
|
label = 0 if len(annotations) < 1 else 1 |
|
example = { |
|
"image": os.path.join(image_dir, image_info["file_name"]), |
|
"label": label, |
|
} |
|
|
|
yield (idx, example) |
|
if self.config.name == "image-matching": |
|
df = pd.read_csv(image_match_annotations, dtype={"file_id": "string"}) |
|
df = df.drop_duplicates(subset=["filename"], keep=False) |
|
df = df.set_index("filename", drop=True) |
|
mapping = df.to_dict("index") |
|
for idx, image_info in enumerate(images): |
|
filename = image_info["file_name"] |
|
match = mapping.get(filename) |
|
if match: |
|
match = match["set_id"] |
|
example = { |
|
"image": os.path.join(image_dir, image_info["file_name"]), |
|
"group-label": match, |
|
} |
|
yield (idx, example) |
|
|