# coding=utf-8 # Copyright 2022 the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datasets _CITATION = """\ @article{Pattern Recognition, Author = {bri A. Mahmoud, Irfan Ahmad, Wasfi G. Al-Khatib, Mohammad Alshayeb, Mohammad Tanvir Parvez, Volker Märgner, Gernot A. Fink}, Title = { {KHATT: An Open Arabic Offline Handwritten Text Database} }, Year = {2013}, doi = {10.1016/j.patcog.2013.08.009}, } """ _HOMEPAGE = "https://khatt.ideas2serve.net/KHATTAgreement.php" _DESCRIPTION = """\ KHATT (KFUPM Handwritten Arabic TexT) database is a database of unconstrained handwritten Arabic Text written by 1000 different writers. This research database’s development was undertaken by a research group from KFUPM, Dhahran, S audi Arabia headed by Professor Sabri Mahmoud in collaboration with Professor Fink from TU-Dortmund, Germany and Dr. Märgner from TU-Braunschweig, Germany. """ _DATA_URL = { "train": [ "https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/train.zip" ], "validation": [ "https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/validation.zip" ], } class KHATT(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "text": datasets.Value("string"), } ), homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" archives = dl_manager.download(_DATA_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "archives": [dl_manager.iter_archive(archive) for archive in archives["train"]], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "archives": [dl_manager.iter_archive(archive) for archive in archives["validation"]], "split": "validation", }, ), ] def _generate_examples(self, archives, split): """Yields examples.""" idx = 0 for archive in archives: for path, file in archive: # If we have an image if path.endswith(".tif"): if split != "test": img_file = file else: text = "" elif path.endswith(".txt"): text = file.read() text = text.decode('utf-8') ex = {"image": {"path": path, "bytes": img_file.read()}, "text": text} yield idx, ex idx += 1