Datasets:
File size: 3,584 Bytes
3f04071 c3158a7 3f04071 c3158a7 3f04071 552016d 3f04071 2de09ce 552016d 3f04071 5038e3d 3f04071 6f48c7f 3f04071 f237940 3f04071 2de09ce 3f04071 9b2124d 2de09ce 3f04071 30631d7 3f04071 2de09ce 5038e3d 3f04071 332af39 c0c7015 3f04071 3ff5d9c 3f04071 fbce027 c8bcfeb fbce027 8564221 fbce027 3ff5d9c fbce027 d22e0a9 7912593 fbce027 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
# coding=utf-8
# Copyright 2022 the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datasets
_CITATION = """\
@article{Pattern Recognition,
Author = {bri A. Mahmoud, Irfan Ahmad, Wasfi G. Al-Khatib, Mohammad Alshayeb, Mohammad Tanvir Parvez, Volker Märgner, Gernot A. Fink},
Title = { {KHATT: An Open Arabic Offline Handwritten Text Database} },
Year = {2013},
doi = {10.1016/j.patcog.2013.08.009},
}
"""
_HOMEPAGE = "https://khatt.ideas2serve.net/KHATTAgreement.php"
_DESCRIPTION = """\
KHATT (KFUPM Handwritten Arabic TexT) database is a database of unconstrained handwritten Arabic Text written by 1000 different writers. This research database’s development was undertaken by a research group from KFUPM, Dhahran, S audi Arabia headed by Professor Sabri Mahmoud in collaboration with Professor Fink from TU-Dortmund, Germany and Dr. Märgner from TU-Braunschweig, Germany.
"""
_DATA_URL = {
"train": [
"https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/train.zip"
],
"validation": [
"https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/validation.zip"
],
}
class KHATT(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"text": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archives = dl_manager.download(_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"archives": [dl_manager.iter_archive(archive) for archive in archives["train"]],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"archives": [dl_manager.iter_archive(archive) for archive in archives["validation"]],
"split": "validation",
},
),
]
def _generate_examples(self, archives, split):
"""Yields examples."""
idx = 0
for archive in archives:
for path, file in archive:
# If we have an image
if path.endswith(".tif"):
if split != "test":
img_file = file
else:
text = ""
elif path.endswith(".txt"):
text = file.read()
text = text.decode('utf-8')
ex = {"image": {"path": path, "bytes": img_file.read()}, "text": text}
yield idx, ex
idx += 1
|