# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import os import datasets # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @INPROCEEDINGS{10022652, author={Al-Fetyani, Mohammad and Al-Barham, Muhammad and Abandah, Gheith and Alsharkawi, Adham and Dawas, Maha}, booktitle={2022 IEEE Spoken Language Technology Workshop (SLT)}, title={MASC: Massive Arabic Speech Corpus}, year={2023}, volume={}, number={}, pages={1006-1013}, doi={10.1109/SLT54892.2023.10022652}} """ # You can copy an official description _DESCRIPTION = """\ This dataset has been collected from twitter which is more than 41 GB of clean data of Arabic Tweets with nearly 4-billion Arabic words (12-million unique Arabic words). """ _HOMEPAGE = "https://ieee-dataport.org/open-access/masc-massive-arabic-speech-corpus" _LICENSE = "https://creativecommons.org/licenses/by/4.0/" # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URLS = { "train": "https://huggingface.co/datasets/pain/Arabic-Tweets/blob/main/lm_twitter.txt", } # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class arabic_tweets(datasets.GeneratorBasedBuilder): """This dataset has been collected from twitter which is more than 41 GB of clean data of Arabic Tweets with nearly 4-billion Arabic words (12-million unique Arabic words).""" VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=datasets.Features( { "text": datasets.Value("string") } ), # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _URLS["train"] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir), "split": "train", }, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, split): """Yields examples.""" with open(filepath, encoding="utf-8") as f: for idx, row in enumerate(f): if row.strip(): yield idx, {"text": row} else: yield idx, {"text": ""}