pain commited on
Commit
7f2843a
1 Parent(s): ae2eb33

Create arabic-tweets.py

Browse files
Files changed (1) hide show
  1. arabic-tweets.py +103 -0
arabic-tweets.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import csv
16
+ import json
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ # Find for instance the citation on arxiv or on the dataset repo/website
23
+ _CITATION = """\
24
+ @INPROCEEDINGS{10022652,
25
+ author={Al-Fetyani, Mohammad and Al-Barham, Muhammad and Abandah, Gheith and Alsharkawi, Adham and Dawas, Maha},
26
+ booktitle={2022 IEEE Spoken Language Technology Workshop (SLT)},
27
+ title={MASC: Massive Arabic Speech Corpus},
28
+ year={2023},
29
+ volume={},
30
+ number={},
31
+ pages={1006-1013},
32
+ doi={10.1109/SLT54892.2023.10022652}}
33
+ """
34
+
35
+ # You can copy an official description
36
+ _DESCRIPTION = """\
37
+ This dataset has been collected from twitter which is more than 41 GB of clean data of Arabic Tweets with nearly 4-billion Arabic words (12-million unique Arabic words).
38
+ """
39
+
40
+ _HOMEPAGE = "https://ieee-dataport.org/open-access/masc-massive-arabic-speech-corpus"
41
+
42
+ _LICENSE = "https://creativecommons.org/licenses/by/4.0/"
43
+
44
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
45
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
+ _URLS = {
47
+ "train": "https://huggingface.co/datasets/pain/Arabic-Tweets/blob/main/lm_twitter.txt",
48
+ }
49
+
50
+
51
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
52
+ class arabic_tweets(datasets.GeneratorBasedBuilder):
53
+ """This dataset has been collected from twitter which is more than 41 GB of clean data of Arabic Tweets with nearly 4-billion Arabic words (12-million unique Arabic words)."""
54
+
55
+ VERSION = datasets.Version("1.0.0")
56
+
57
+ def _info(self):
58
+
59
+ return datasets.DatasetInfo(
60
+ # This is the description that will appear on the datasets page.
61
+ description=_DESCRIPTION,
62
+ # This defines the different columns of the dataset and their types
63
+ features=datasets.Features(
64
+ {
65
+ "text": datasets.Value("string")
66
+ }
67
+ ), # Here we define them above because they are different between the two configurations
68
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
69
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
70
+ # supervised_keys=("sentence", "label"),
71
+ # Homepage of the dataset for documentation
72
+ homepage=_HOMEPAGE,
73
+ # License for the dataset if available
74
+ license=_LICENSE,
75
+ # Citation for the dataset
76
+ citation=_CITATION,
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+
81
+ urls = _URLS["train"]
82
+ data_dir = dl_manager.download_and_extract(urls)
83
+ return [
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TRAIN,
86
+ # These kwargs will be passed to _generate_examples
87
+ gen_kwargs={
88
+ "filepath": os.path.join(data_dir),
89
+ "split": "train",
90
+ },
91
+ ),
92
+ ]
93
+
94
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
95
+ def _generate_examples(self, filepath, split):
96
+
97
+ """Yields examples."""
98
+ with open(filepath, encoding="utf-8") as f:
99
+ for idx, row in enumerate(f):
100
+ if row.strip():
101
+ yield idx, {"text": row}
102
+ else:
103
+ yield idx, {"text": ""}