VIST / VIST.py
NimaBoscarino's picture
Update VIST.py
16fa1a3
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import json
import os
from huggingface_hub import hf_hub_download
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "http://visionandlanguage.net/VIST/dataset.html"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_DII_WORKER_IDS = hf_hub_download(repo_type="dataset", repo_id="society-ethics/VIST", filename="data/dii.worker_ids.csv", use_auth_token=True)
_URLS = {
"DII": {
"train": "https://huggingface.co/datasets/NimaBoscarino/VIST/resolve/main/data/train.dii.jsonl.zip",
"test": "https://huggingface.co/datasets/NimaBoscarino/VIST/resolve/main/data/test.dii.jsonl.zip",
"val": "https://huggingface.co/datasets/NimaBoscarino/VIST/resolve/main/data/val.dii.jsonl.zip",
},
"SIS": "http://visionandlanguage.net/VIST/json_files/story-in-sequence/SIS-with-labels.tar.gz",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class VIST(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="DII", version=VERSION, description=""),
datasets.BuilderConfig(name="SIS", version=VERSION, description=""),
]
def _info(self):
features = None
if self.config.name == "DII":
features = datasets.Features({
'description': datasets.Value("string"),
'title': datasets.Value("string"),
'farm': datasets.ClassLabel(num_classes=10), # Actually 9, but datasets complains for some reason?
'date_update': datasets.Value("timestamp[s]"),
'primary': datasets.Value("int32"),
'server': datasets.Value("int16"),
'date_create': datasets.Value("timestamp[s]"),
'photos': datasets.Value("int16"),
'secret': datasets.Value("string"),
'owner': datasets.Value("string"),
'vist_label': datasets.Value("string"),
'id': datasets.Value("int64"),
"images": datasets.Sequence({
'datetaken': datasets.Value("date64"),
'license': datasets.ClassLabel(num_classes=7),
'image_title': datasets.Value("string"),
'longitude': datasets.Value("float64"),
'url': datasets.Image(decode=False),
'image_secret': datasets.Value("string"),
'media': datasets.ClassLabel(num_classes=2, names=["photo", "video"]),
'latitude': datasets.Value("float64"),
'image_id': datasets.Value("int64"),
'tags': [datasets.Value("string")],
'image_farm': datasets.ClassLabel(names=["1", "2", "6", "7"]), # From exploring the data
'image_server': datasets.Value("int16"),
"annotations": datasets.Sequence({
'original_text': datasets.Value("string"),
'photo_order_in_story': datasets.Value("int8"),
'worker_id': datasets.ClassLabel(names_file=_DII_WORKER_IDS),
'text': datasets.Value("string"),
})
})
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dirs = dl_manager.download_and_extract(urls)
for split in data_dirs:
archive_path = data_dirs[split]
if archive_path.endswith(".zip") or os.path.isdir(archive_path):
data_dirs[split] = os.path.join(archive_path, os.listdir(archive_path)[0])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dirs["train"],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": data_dirs["val"],
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dirs["test"],
"split": "test"
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key, data