|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Localized Narratives""" |
|
import json |
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{PontTuset_eccv2020, |
|
author = {Jordi Pont-Tuset and Jasper Uijlings and Soravit Changpinyo and Radu Soricut and Vittorio Ferrari}, |
|
title = {Connecting Vision and Language with Localized Narratives}, |
|
booktitle = {ECCV}, |
|
year = {2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
Localized Narratives, a new form of multimodal image annotations connecting vision and language. |
|
We ask annotators to describe an image with their voice while simultaneously hovering their mouse over the region they are describing. |
|
Since the voice and the mouse pointer are synchronized, we can localize every single word in the description. |
|
This dense visual grounding takes the form of a mouse trace segment per word and is unique to our data. |
|
We annotated 849k images with Localized Narratives: the whole COCO, Flickr30k, and ADE20K datasets, and 671k images of Open Images, all of which we make publicly available. |
|
""" |
|
|
|
_HOMEPAGE = "https://google.github.io/localized-narratives/" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_ANNOTATION_URLs = { |
|
"train": [ |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00000-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00001-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00002-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00003-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00004-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00005-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00006-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00007-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00008-of-00010.jsonl", |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_localized_narratives-00009-of-00010.jsonl", |
|
], |
|
"validation": [ |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_validation_localized_narratives.jsonl" |
|
], |
|
"test": [ |
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_test_localized_narratives.jsonl" |
|
], |
|
} |
|
|
|
|
|
_FEATURES = { |
|
"OpenImages": datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"image_url": datasets.Value("string"), |
|
"dataset_id": datasets.Value("string"), |
|
"image_id": datasets.Value("string"), |
|
"annotator_id": datasets.Value("int32"), |
|
"caption": datasets.Value("string"), |
|
"timed_caption": datasets.Sequence( |
|
{ |
|
"utterance": datasets.Value("string"), |
|
"start_time": datasets.Value("float32"), |
|
"end_time": datasets.Value("float32"), |
|
} |
|
), |
|
"traces": datasets.Sequence( |
|
datasets.Sequence( |
|
{ |
|
"x": datasets.Value("float32"), |
|
"y": datasets.Value("float32"), |
|
"t": datasets.Value("float32"), |
|
} |
|
) |
|
), |
|
"voice_recording": datasets.Value("string"), |
|
} |
|
), |
|
"OpenImages_captions": datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"image_url": datasets.Value("string"), |
|
"dataset_id": datasets.Value("string"), |
|
"image_id": datasets.Value("string"), |
|
"annotator_ids": [datasets.Value("int32")], |
|
"captions": [datasets.Value("string")], |
|
} |
|
), |
|
} |
|
|
|
|
|
class LocalizedNarrativesOpenImages(datasets.GeneratorBasedBuilder): |
|
"""Builder for Localized Narratives.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="OpenImages", |
|
version=VERSION, |
|
description="OpenImages subset of Localized Narratives" |
|
), |
|
datasets.BuilderConfig( |
|
name="OpenImages_captions", |
|
version=VERSION, |
|
description="OpenImages subset of Localized Narratives where captions are groupped per image (images can have multiple captions). For this subset, `timed_caption`, `traces` and `voice_recording` are not available." |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "OpenImages" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=_FEATURES[self.config.name], |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
annotation_files = dl_manager.download(_ANNOTATION_URLs) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split_name, |
|
gen_kwargs={"annotation_list": annotation_list, "split": split_name}, |
|
) |
|
for split_name, annotation_list in annotation_files.items() |
|
] |
|
|
|
def _generate_examples(self, annotation_list: str, split: str): |
|
if self.config.name == "OpenImages": |
|
return self._generate_examples_original_format(annotation_list, split) |
|
elif self.config.name == "OpenImages_captions": |
|
return self._generate_examples_aggregated_captions(annotation_list, split) |
|
|
|
def _generate_examples_original_format(self, annotation_list: str, split: str): |
|
counter = 0 |
|
for annotation_file in annotation_list: |
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
for line in fi: |
|
annotation = json.loads(line) |
|
image_url = f"https://s3.amazonaws.com/open-images-dataset/{split}/{annotation['image_id']}.jpg" |
|
yield counter, { |
|
"image": image_url, |
|
"image_url": image_url, |
|
"dataset_id": annotation["dataset_id"], |
|
"image_id": annotation["image_id"], |
|
"annotator_id": annotation["annotator_id"], |
|
"caption": annotation["caption"], |
|
"timed_caption": annotation["timed_caption"], |
|
"traces": annotation["traces"], |
|
"voice_recording": annotation["voice_recording"], |
|
} |
|
counter += 1 |
|
|
|
def _generate_examples_aggregated_captions(self, annotation_list: str, split: str): |
|
result = {} |
|
for annotation_file in annotation_list: |
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
for line in fi: |
|
annotation = json.loads(line) |
|
image_url = f"https://s3.amazonaws.com/open-images-dataset/{split}/{annotation['image_id']}.jpg" |
|
image_id = annotation["image_id"] |
|
if image_id in result: |
|
assert result[image_id]["dataset_id"] == annotation["dataset_id"] |
|
assert result[image_id]["image_id"] == annotation["image_id"] |
|
result[image_id]["annotator_ids"].append(annotation["annotator_id"]) |
|
result[image_id]["captions"].append(annotation["caption"]) |
|
else: |
|
result[image_id] = { |
|
"image": image_url, |
|
"image_url": image_url, |
|
"dataset_id": annotation["dataset_id"], |
|
"image_id": image_id, |
|
"annotator_ids": [annotation["annotator_id"]], |
|
"captions": [annotation["caption"]], |
|
} |
|
counter = 0 |
|
for r in result.values(): |
|
yield counter, r |
|
counter += 1 |
|
|