IIIT-5K / IIIT-5K.py
HugoLaurencon's picture
fix bug
9498452
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""IIIT5K dataset."""
import scipy.io
import datasets
import os
from pathlib import Path
_CITATION = """\
@InProceedings{MishraBMVC12,
author = "Mishra, A. and Alahari, K. and Jawahar, C.~V.",
title = "Scene Text Recognition using Higher Order Language Priors",
booktitle= "BMVC",
year = "2012"
}
"""
_DESCRIPTION = """\
The IIIT 5K-Word dataset is harvested from Google image search.
Query words like billboards, signboard, house numbers, house name plates, movie posters were used to collect images.
The dataset contains 5000 cropped word images from Scene Texts and born-digital images.
The dataset is divided into train and test parts.
This dataset can be used for large lexicon cropped word recognition.
We also provide a lexicon of more than 0.5 million dictionary words with this dataset.
"""
_HOMEPAGE = "http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K.html"
_DL_URL = "http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K-Word_V3.0.tar.gz"
class IIIT5K(datasets.GeneratorBasedBuilder):
"""IIIT-5K dataset."""
def _info(self):
features = datasets.Features(
{
"image": datasets.Image(),
"label": datasets.Value("string"),
"small_lexicon": datasets.Sequence(datasets.Value("string")),
"medium_lexicon": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download_and_extract(_DL_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split": "train",
"archive_path": Path(archive_path) / "IIIT5K",
"info_path": Path(archive_path) / "IIIT5K/traindata.mat",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split": "test",
"archive_path": Path(archive_path) / "IIIT5K",
"info_path": Path(archive_path) / "IIIT5K/testdata.mat",
},
),
]
def _generate_examples(self, split, archive_path, info_path):
info = scipy.io.loadmat(info_path)
info = info[split+"data"][0]
for idx, info_ex in enumerate(info):
path_image = os.path.join(archive_path, str(info_ex[0][0]))
label = str(info_ex[1][0])
small_lexicon = [str(lab[0]) for lab in info_ex[2][0]]
medium_lexicon = [str(lab[0]) for lab in info_ex[3][0]]
yield idx, {
"image": path_image,
"label": label,
"small_lexicon": small_lexicon,
"medium_lexicon": medium_lexicon,
}