NimaBoscarino's picture
Update LILA.py
05992ae
raw
history blame
6.98 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import json
import os
import math
import requests
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
import pandas as pd
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_LILA_SAS_URLS = pd.read_csv("https://lila.science/wp-content/uploads/2020/03/lila_sas_urls.txt")
_LILA_SAS_URLS.rename(columns={"# name": "name"}, inplace=True)
# How do I make these point to the particular commit ID?
_LILA_URLS = {
"Caltech Camera Traps": "https://huggingface.co/datasets/NimaBoscarino/LILA/resolve/main/data/Caltech_Camera_Traps.jsonl",
"ENA24": "https://huggingface.co/datasets/NimaBoscarino/LILA/resolve/main/data/ENA24.jsonl",
"Missouri Camera Traps": "",
"NACTI": "",
"WCS Camera Traps": "",
"Wellington Camera Traps": "",
"Island Conservation Camera Traps": "",
"Channel Islands Camera Traps": "",
"Idaho Camera Traps": "",
"Snapshot Serengeti": "",
"Snapshot Karoo": "",
"Snapshot Kgalagadi": "",
"Snapshot Enonkishu": "",
"Snapshot Camdeboo": "",
"Snapshot Mountain Zebra": "",
"Snapshot Kruger": "",
"SWG Camera Traps": "",
"Orinoquia Camera Traps": "",
}
# TODO: Just to make the Dataset viewer on the Hub work
DEFAULT_CONFIG_NAME = "Caltech Camera Traps"
class LILAConfig(datasets.BuilderConfig):
"""Builder Config for LILA"""
def __init__(self, image_base_url, metadata_url, **kwargs):
"""BuilderConfig for LILA.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(LILAConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.image_base_url = image_base_url
self.metadata_url = metadata_url
class LILA(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
LILAConfig(
name=row.name,
# description="TODO: Description",
image_base_url=row.image_base_url,
metadata_url=_LILA_URLS[row.name]
) for row in _LILA_SAS_URLS.itertuples()
]
def _get_features(self) -> datasets.Features:
# TODO: Use ClassLabel for categories...
# TODO: Deal with 404s -> In my manual preprocessing, or in the datasets library?
if self.config.name == 'Caltech Camera Traps':
return datasets.Features({
"id": datasets.Value("string"), "file_name": datasets.Value("string"),
"width": datasets.Value("int32"), "height": datasets.Value("int32"),
"seq_num_frames": datasets.Value("int32"),
"date_captured": datasets.Value("date32"),
"seq_id": datasets.Value("string"),
"location": datasets.Value("string"),
"rights_holder": datasets.Value("string"),
"frame_num": datasets.Value("int32"),
"annotations": datasets.Sequence({
"id": datasets.Value("string"),
"category_id": datasets.Value("int32"),
}),
"bboxes": datasets.Sequence({
"id": datasets.Value("string"),
"category_id": datasets.Value("int32"),
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
}),
"image": datasets.Image(decode=False),
})
elif self.config.name == 'ENA24':
return datasets.Features({
"id": datasets.Value("string"), "file_name": datasets.Value("string"),
"width": datasets.Value("int32"), "height": datasets.Value("int32"),
"annotations": datasets.Sequence({
"id": datasets.Value("string"),
"category_id": datasets.Value("int32"),
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
}),
"image": datasets.Image(decode=False),
})
def _info(self):
features = self._get_features()
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download_and_extract(self.config.metadata_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": archive_path,
"split": "train",
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath) as f:
for line in f:
example = json.loads(line)
image_url = f"{self.config.image_base_url}/{example['file_name']}"
yield example["id"], {
**example,
"image": image_url
}