initial commit
Browse files- .gitattributes +9 -0
- README.md +379 -0
- arabic_billion_words.py +178 -0
- dataset/README.md +3 -0
- dataset/dataset/README.md +20 -0
- dataset/dataset/new_data_Alittihad_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Almasryalyoum_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Almustaqbal_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Alqabas_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Echoroukonline_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Ryiadh_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Sabanews_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_SaudiYoum_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Techreen_XML_utf_8.rar +3 -0
- dataset/dataset/new_data_Youm7_XML_utf_8.rar +3 -0
- dataset/new_data_Alittihad_XML_utf_8.rar +3 -0
- dataset/new_data_Almasryalyoum_XML_utf_8.rar +3 -0
- dataset/new_data_Almustaqbal_XML_utf_8.rar +3 -0
- dataset/new_data_Echoroukonline_XML_utf_8.rar +3 -0
- dataset/new_data_Sabanews_XML_utf_8.rar +3 -0
- dataset/new_data_SaudiYoum_XML_utf_8.rar +3 -0
- dataset/new_data_Techreen_XML_utf_8.rar +3 -0
- dataset_infos.json +1 -0
.gitattributes
CHANGED
@@ -53,3 +53,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
dataset/README.md filter=lfs diff=lfs merge=lfs -text
|
57 |
+
dataset/new_data_Alittihad_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
58 |
+
dataset/new_data_Almasryalyoum_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
59 |
+
dataset/new_data_Almustaqbal_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
60 |
+
dataset/new_data_Echoroukonline_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
61 |
+
dataset/new_data_Sabanews_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
62 |
+
dataset/new_data_SaudiYoum_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
63 |
+
dataset/new_data_Techreen_XML_utf_8.rar filter=lfs diff=lfs merge=lfs -text
|
64 |
+
dataset/dataset filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- found
|
4 |
+
language_creators:
|
5 |
+
- found
|
6 |
+
language:
|
7 |
+
- ar
|
8 |
+
license:
|
9 |
+
- unknown
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
size_categories:
|
13 |
+
- 100K<n<1M
|
14 |
+
- 10K<n<100K
|
15 |
+
- 1M<n<10M
|
16 |
+
source_datasets:
|
17 |
+
- original
|
18 |
+
task_categories:
|
19 |
+
- text-generation
|
20 |
+
- fill-mask
|
21 |
+
task_ids:
|
22 |
+
- language-modeling
|
23 |
+
- masked-language-modeling
|
24 |
+
paperswithcode_id: null
|
25 |
+
pretty_name: Arabic Billion Words
|
26 |
+
dataset_info:
|
27 |
+
- config_name: Alittihad
|
28 |
+
features:
|
29 |
+
- name: url
|
30 |
+
dtype: string
|
31 |
+
- name: head_line
|
32 |
+
dtype: string
|
33 |
+
- name: date
|
34 |
+
dtype: string
|
35 |
+
- name: text
|
36 |
+
dtype: string
|
37 |
+
splits:
|
38 |
+
- name: train
|
39 |
+
num_bytes: 1601790302
|
40 |
+
num_examples: 349342
|
41 |
+
download_size: 348259999
|
42 |
+
dataset_size: 1601790302
|
43 |
+
- config_name: Almasryalyoum
|
44 |
+
features:
|
45 |
+
- name: url
|
46 |
+
dtype: string
|
47 |
+
- name: head_line
|
48 |
+
dtype: string
|
49 |
+
- name: date
|
50 |
+
dtype: string
|
51 |
+
- name: text
|
52 |
+
dtype: string
|
53 |
+
splits:
|
54 |
+
- name: train
|
55 |
+
num_bytes: 1056197870
|
56 |
+
num_examples: 291723
|
57 |
+
download_size: 242604438
|
58 |
+
dataset_size: 1056197870
|
59 |
+
- config_name: Almustaqbal
|
60 |
+
features:
|
61 |
+
- name: url
|
62 |
+
dtype: string
|
63 |
+
- name: head_line
|
64 |
+
dtype: string
|
65 |
+
- name: date
|
66 |
+
dtype: string
|
67 |
+
- name: text
|
68 |
+
dtype: string
|
69 |
+
splits:
|
70 |
+
- name: train
|
71 |
+
num_bytes: 1545659336
|
72 |
+
num_examples: 446873
|
73 |
+
download_size: 350826797
|
74 |
+
dataset_size: 1545659336
|
75 |
+
- config_name: Alqabas
|
76 |
+
features:
|
77 |
+
- name: url
|
78 |
+
dtype: string
|
79 |
+
- name: head_line
|
80 |
+
dtype: string
|
81 |
+
- name: date
|
82 |
+
dtype: string
|
83 |
+
- name: text
|
84 |
+
dtype: string
|
85 |
+
splits:
|
86 |
+
- name: train
|
87 |
+
num_bytes: 2631729746
|
88 |
+
num_examples: 817274
|
89 |
+
download_size: 595274646
|
90 |
+
dataset_size: 2631729746
|
91 |
+
- config_name: Echoroukonline
|
92 |
+
features:
|
93 |
+
- name: url
|
94 |
+
dtype: string
|
95 |
+
- name: head_line
|
96 |
+
dtype: string
|
97 |
+
- name: date
|
98 |
+
dtype: string
|
99 |
+
- name: text
|
100 |
+
dtype: string
|
101 |
+
splits:
|
102 |
+
- name: train
|
103 |
+
num_bytes: 464386206
|
104 |
+
num_examples: 139732
|
105 |
+
download_size: 108184378
|
106 |
+
dataset_size: 464386206
|
107 |
+
- config_name: Ryiadh
|
108 |
+
features:
|
109 |
+
- name: url
|
110 |
+
dtype: string
|
111 |
+
- name: head_line
|
112 |
+
dtype: string
|
113 |
+
- name: date
|
114 |
+
dtype: string
|
115 |
+
- name: text
|
116 |
+
dtype: string
|
117 |
+
splits:
|
118 |
+
- name: train
|
119 |
+
num_bytes: 3101294859
|
120 |
+
num_examples: 858188
|
121 |
+
download_size: 691264971
|
122 |
+
dataset_size: 3101294859
|
123 |
+
- config_name: Sabanews
|
124 |
+
features:
|
125 |
+
- name: url
|
126 |
+
dtype: string
|
127 |
+
- name: head_line
|
128 |
+
dtype: string
|
129 |
+
- name: date
|
130 |
+
dtype: string
|
131 |
+
- name: text
|
132 |
+
dtype: string
|
133 |
+
splits:
|
134 |
+
- name: train
|
135 |
+
num_bytes: 198019614
|
136 |
+
num_examples: 92149
|
137 |
+
download_size: 38214558
|
138 |
+
dataset_size: 198019614
|
139 |
+
- config_name: SaudiYoum
|
140 |
+
features:
|
141 |
+
- name: url
|
142 |
+
dtype: string
|
143 |
+
- name: head_line
|
144 |
+
dtype: string
|
145 |
+
- name: date
|
146 |
+
dtype: string
|
147 |
+
- name: text
|
148 |
+
dtype: string
|
149 |
+
splits:
|
150 |
+
- name: train
|
151 |
+
num_bytes: 2723291416
|
152 |
+
num_examples: 888068
|
153 |
+
download_size: 605537923
|
154 |
+
dataset_size: 2723291416
|
155 |
+
- config_name: Techreen
|
156 |
+
features:
|
157 |
+
- name: url
|
158 |
+
dtype: string
|
159 |
+
- name: head_line
|
160 |
+
dtype: string
|
161 |
+
- name: date
|
162 |
+
dtype: string
|
163 |
+
- name: text
|
164 |
+
dtype: string
|
165 |
+
splits:
|
166 |
+
- name: train
|
167 |
+
num_bytes: 1103458209
|
168 |
+
num_examples: 314597
|
169 |
+
download_size: 252976781
|
170 |
+
dataset_size: 1103458209
|
171 |
+
- config_name: Youm7
|
172 |
+
features:
|
173 |
+
- name: url
|
174 |
+
dtype: string
|
175 |
+
- name: head_line
|
176 |
+
dtype: string
|
177 |
+
- name: date
|
178 |
+
dtype: string
|
179 |
+
- name: text
|
180 |
+
dtype: string
|
181 |
+
splits:
|
182 |
+
- name: train
|
183 |
+
num_bytes: 3004689464
|
184 |
+
num_examples: 1172136
|
185 |
+
download_size: 617708074
|
186 |
+
dataset_size: 3004689464
|
187 |
+
config_names:
|
188 |
+
- Alittihad
|
189 |
+
- Almasryalyoum
|
190 |
+
- Almustaqbal
|
191 |
+
- Alqabas
|
192 |
+
- Echoroukonline
|
193 |
+
- Ryiadh
|
194 |
+
- Sabanews
|
195 |
+
- SaudiYoum
|
196 |
+
- Techreen
|
197 |
+
- Youm7
|
198 |
+
---
|
199 |
+
|
200 |
+
# Dataset Card for Arabic Billion Words Corpus
|
201 |
+
|
202 |
+
## Table of Contents
|
203 |
+
- [Dataset Description](#dataset-description)
|
204 |
+
- [Dataset Summary](#dataset-summary)
|
205 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
206 |
+
- [Languages](#languages)
|
207 |
+
- [Dataset Structure](#dataset-structure)
|
208 |
+
- [Data Instances](#data-instances)
|
209 |
+
- [Data Fields](#data-fields)
|
210 |
+
- [Data Splits](#data-splits)
|
211 |
+
- [Dataset Creation](#dataset-creation)
|
212 |
+
- [Curation Rationale](#curation-rationale)
|
213 |
+
- [Source Data](#source-data)
|
214 |
+
- [Annotations](#annotations)
|
215 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
216 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
217 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
218 |
+
- [Discussion of Biases](#discussion-of-biases)
|
219 |
+
- [Other Known Limitations](#other-known-limitations)
|
220 |
+
- [Additional Information](#additional-information)
|
221 |
+
- [Dataset Curators](#dataset-curators)
|
222 |
+
- [Licensing Information](#licensing-information)
|
223 |
+
- [Citation Information](#citation-information)
|
224 |
+
- [Contributions](#contributions)
|
225 |
+
|
226 |
+
## Dataset Description
|
227 |
+
|
228 |
+
- **Homepage:** http://www.abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus
|
229 |
+
- **Repository:**
|
230 |
+
- **Paper:** https://arxiv.org/pdf/1611.04033
|
231 |
+
- **Leaderboard:**
|
232 |
+
- **Point of Contact:**[Ibrahim Abu El-Khair](iabuelkhair@gmail.com)
|
233 |
+
|
234 |
+
### Dataset Summary
|
235 |
+
|
236 |
+
Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.
|
237 |
+
It contains over a billion and a half words in total, out of which, there are about three million unique words.
|
238 |
+
The corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.
|
239 |
+
Also it was marked with two mark-up languages, namely: SGML, and XML.
|
240 |
+
|
241 |
+
**NB:** this dataset is based on the [unofficial copy](https://drive.google.com/drive/folders/1F2wCEfFHzJqX7eTuWhh-pGtrsaHPvTT8?usp=drive_link) ([discussion](https://huggingface.co/datasets/arabic_billion_words/discussions/3)) of the data, and assumes it was downloaded properly. Put the `new_data_*` files to the `./dataset` folder like this:
|
242 |
+
```
|
243 |
+
[user@machine /path/to/dataset]$ tree
|
244 |
+
.
|
245 |
+
├── arabic_billion_words.py
|
246 |
+
├── dataset
|
247 |
+
│ ├── new_data_Alittihad_XML_utf_8.rar
|
248 |
+
│ ├── new_data_Almasryalyoum_XML_utf_8.rar
|
249 |
+
│ ├── new_data_Almustaqbal_XML_utf_8.rar
|
250 |
+
│ ├── new_data_Alqabas_XML_utf_8.rar
|
251 |
+
│ ├── new_data_Echoroukonline_XML_utf_8.rar
|
252 |
+
│ ├── new_data_Ryiadh_XML_utf_8.rar
|
253 |
+
│ ├── new_data_Sabanews_XML_utf_8.rar
|
254 |
+
│ ├── new_data_SaudiYoum_XML_utf_8.rar
|
255 |
+
│ ├── new_data_Techreen_XML_utf_8.rar
|
256 |
+
│ └── new_data_Youm7_XML_utf_8.rar
|
257 |
+
├── dataset_infos.json
|
258 |
+
├── README.md
|
259 |
+
└── usage_example.py
|
260 |
+
```
|
261 |
+
|
262 |
+
### Supported Tasks and Leaderboards
|
263 |
+
|
264 |
+
[More Information Needed]
|
265 |
+
|
266 |
+
### Languages
|
267 |
+
|
268 |
+
Arabic
|
269 |
+
|
270 |
+
## Dataset Structure
|
271 |
+
|
272 |
+
### Data Instances
|
273 |
+
|
274 |
+
This is an example of the "Almasryalyoum" configuration subset:
|
275 |
+
```python
|
276 |
+
{
|
277 |
+
"url": "http://today.almasryalyoum.com/printerfriendly.aspx?ArticleID=61300",
|
278 |
+
"head_line": "رئيس وزراء المجر: عنصرية جماهير أوجبيست جلبت العار للبلاد",
|
279 |
+
"date": "19/5/2007",
|
280 |
+
"text": """قال متحدث باسم الحكومة المجرية: إن رئيس الوزراء فيرنك جيوركساني رحب بقرار اتحاد كرة القدم المجري بخصم ثلاث نقاط من نادي أوجبيست بسبب السلوك العنصري الذي صدر من جماهيره.
|
281 |
+
وعاقب الاتحاد المجري فريق أوجبيست بعد أن سخرت جماهيره من إبراهيم سيديبي مهاجم فريق ديبرينسين الأسود أثناء مباراة الفريقين أوائل مايو الجاري.
|
282 |
+
يذكر أن الاتحاد فرض أيضا غرامة مالية قدرها 20 ألف دولار علي أوجبيست في عام 2005 بعد أن رددت جماهيره شعارات معادية للسامية خلال مباراة بالدوري المجري.
|
283 |
+
وأوضح جيوركساني في خطاب إلي إيستفان كيستليكي رئيس الاتحاد المجري لكرة القدم، أن هذا السلوك العنصري من الجماهير «جلب العار لكرة القدم وللمجر». يذكر أن المجر بها مجموعة من مشجعي كرة القدم المشاغبين «الهوليجانز»، وشارك الكثير منهم في أعمال شغب معادية للحكومة في العام الماضي.""",
|
284 |
+
}
|
285 |
+
```
|
286 |
+
|
287 |
+
### Data Fields
|
288 |
+
|
289 |
+
The data fields are:
|
290 |
+
- "url": string, original url of the article,
|
291 |
+
- "head_line": string, headline of the article,
|
292 |
+
- "date": string, date of the article,
|
293 |
+
- "text": string, text content of the article,
|
294 |
+
|
295 |
+
### Data Splits
|
296 |
+
|
297 |
+
There is only one "training" split for all configuration subsets, containing the following number of examples:
|
298 |
+
|
299 |
+
| | Number of examples |
|
300 |
+
|:---------------|-------------------:|
|
301 |
+
| Alittihad | 349342 |
|
302 |
+
| Almasryalyoum | 291723 |
|
303 |
+
| Almustaqbal | 446873 |
|
304 |
+
| Alqabas | 817274 |
|
305 |
+
| Echoroukonline | 139732 |
|
306 |
+
| Ryiadh | 858188 |
|
307 |
+
| Sabanews | 92149 |
|
308 |
+
| SaudiYoum | 888068 |
|
309 |
+
| Techreen | 314597 |
|
310 |
+
| Youm7 | 1172136 |
|
311 |
+
|
312 |
+
## Dataset Creation
|
313 |
+
|
314 |
+
### Curation Rationale
|
315 |
+
|
316 |
+
[More Information Needed]
|
317 |
+
|
318 |
+
### Source Data
|
319 |
+
|
320 |
+
#### Initial Data Collection and Normalization
|
321 |
+
|
322 |
+
[More Information Needed]
|
323 |
+
|
324 |
+
#### Who are the source language producers?
|
325 |
+
|
326 |
+
[More Information Needed]
|
327 |
+
|
328 |
+
### Annotations
|
329 |
+
|
330 |
+
#### Annotation process
|
331 |
+
|
332 |
+
[More Information Needed]
|
333 |
+
|
334 |
+
#### Who are the annotators?
|
335 |
+
|
336 |
+
[More Information Needed]
|
337 |
+
|
338 |
+
### Personal and Sensitive Information
|
339 |
+
|
340 |
+
[More Information Needed]
|
341 |
+
|
342 |
+
## Considerations for Using the Data
|
343 |
+
|
344 |
+
### Social Impact of Dataset
|
345 |
+
|
346 |
+
[More Information Needed]
|
347 |
+
|
348 |
+
### Discussion of Biases
|
349 |
+
|
350 |
+
[More Information Needed]
|
351 |
+
|
352 |
+
### Other Known Limitations
|
353 |
+
|
354 |
+
[More Information Needed]
|
355 |
+
|
356 |
+
## Additional Information
|
357 |
+
|
358 |
+
### Dataset Curators
|
359 |
+
|
360 |
+
[More Information Needed]
|
361 |
+
|
362 |
+
### Licensing Information
|
363 |
+
|
364 |
+
[More Information Needed]
|
365 |
+
|
366 |
+
### Citation Information
|
367 |
+
|
368 |
+
```
|
369 |
+
@article{el20161,
|
370 |
+
title={1.5 billion words arabic corpus},
|
371 |
+
author={El-Khair, Ibrahim Abu},
|
372 |
+
journal={arXiv preprint arXiv:1611.04033},
|
373 |
+
year={2016}
|
374 |
+
}
|
375 |
+
```
|
376 |
+
|
377 |
+
### Contributions
|
378 |
+
|
379 |
+
Thanks to [@zaidalyafeai](https://github.com/zaidalyafeai) and [@albertvillanova](https://github.com/albertvillanova) for adding this dataset.
|
arabic_billion_words.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Arabic Billion Words Corpus"""
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
import re
|
20 |
+
|
21 |
+
import datasets
|
22 |
+
|
23 |
+
|
24 |
+
_CITATION = """\
|
25 |
+
@article{el20161,
|
26 |
+
title={1.5 billion words arabic corpus},
|
27 |
+
author={El-Khair, Ibrahim Abu},
|
28 |
+
journal={arXiv preprint arXiv:1611.04033},
|
29 |
+
year={2016}
|
30 |
+
}
|
31 |
+
"""
|
32 |
+
|
33 |
+
_DESCRIPTION = """\
|
34 |
+
THIS IS A FORK FOR LOCAL USAGE.
|
35 |
+
Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.
|
36 |
+
It contains over a billion and a half words in total, out of which, there are about three million unique words.
|
37 |
+
The corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.
|
38 |
+
Also it was marked with two mark-up languages, namely: SGML, and XML.
|
39 |
+
"""
|
40 |
+
|
41 |
+
_HOMEPAGE = "https://huggingface.co/datasets/arabic_billion_words"
|
42 |
+
|
43 |
+
from pathlib import Path
|
44 |
+
|
45 |
+
here = Path(__file__).parent
|
46 |
+
# fname = here / "test.txt"
|
47 |
+
_BASE_PATH = here /"dataset/new_data_"
|
48 |
+
DATASET_PATHS = {
|
49 |
+
"Alittihad": _BASE_PATH / "Alittihad_XML_utf_8.rar",
|
50 |
+
"Almasryalyoum": _BASE_PATH / "Almasryalyoum_XML_utf_8.rar",
|
51 |
+
"Almustaqbal": _BASE_PATH / "Almustaqbal_XML_utf_8.rar",
|
52 |
+
"Alqabas": _BASE_PATH / "Alqabas_XML_utf_8.rar",
|
53 |
+
"Echoroukonline": _BASE_PATH / "Echoroukonline_XML_utf_8.rar",
|
54 |
+
"Ryiadh": _BASE_PATH / "Ryiadh_XML_utf_8.rar",
|
55 |
+
"Sabanews": _BASE_PATH / "Sabanews_XML_utf_8.rar",
|
56 |
+
"SaudiYoum": _BASE_PATH / "SaudiYoum_XML_utf_8.rar",
|
57 |
+
"Techreen": _BASE_PATH / "Techreen_XML_utf_8.rar",
|
58 |
+
"Youm7": _BASE_PATH / "Youm7_XML_utf_8.rar",
|
59 |
+
}
|
60 |
+
|
61 |
+
# Some tags are misspelled
|
62 |
+
# - Misspelled article tags:
|
63 |
+
# - Alqabas: <Alqabas>, <Alqabas1>
|
64 |
+
# - Ryiadh: <Ryiadh>, <Ryiadh1>
|
65 |
+
MISSPELLED_TAGS = {
|
66 |
+
"Dateline": ["Dateline", "dateline"],
|
67 |
+
"Headline": ["Headline", "Healine"],
|
68 |
+
"Text": ["Text"],
|
69 |
+
"URL": ["URL"],
|
70 |
+
}
|
71 |
+
|
72 |
+
TAG_PATTERNS = {
|
73 |
+
tag: [re.compile(rf".*?<{label}>(.*?)</{label}>.*?", re.MULTILINE | re.DOTALL) for label in labels]
|
74 |
+
for tag, labels in MISSPELLED_TAGS.items()
|
75 |
+
}
|
76 |
+
|
77 |
+
|
78 |
+
class ArabicBillionWords(datasets.GeneratorBasedBuilder):
|
79 |
+
"""Arabic Billion Words Corpus"""
|
80 |
+
|
81 |
+
VERSION = datasets.Version("1.1.0")
|
82 |
+
|
83 |
+
BUILDER_CONFIGS = [
|
84 |
+
datasets.BuilderConfig(
|
85 |
+
name="Alittihad", version=VERSION, description="This part of dataset covers Alittihad news paper"
|
86 |
+
),
|
87 |
+
datasets.BuilderConfig(
|
88 |
+
name="Almasryalyoum", version=VERSION, description="This part of dataset covers Almasryalyoum news paper"
|
89 |
+
),
|
90 |
+
datasets.BuilderConfig(
|
91 |
+
name="Almustaqbal", version=VERSION, description="This part of dataset covers Almustaqbal news paper"
|
92 |
+
),
|
93 |
+
datasets.BuilderConfig(
|
94 |
+
name="Alqabas", version=VERSION, description="This part of dataset covers Alqabas news paper"
|
95 |
+
),
|
96 |
+
datasets.BuilderConfig(
|
97 |
+
name="Echoroukonline", version=VERSION, description="This part of dataset covers Echoroukonline news paper"
|
98 |
+
),
|
99 |
+
datasets.BuilderConfig(
|
100 |
+
name="Ryiadh", version=VERSION, description="This part of dataset covers Ryiadh news paper"
|
101 |
+
),
|
102 |
+
datasets.BuilderConfig(
|
103 |
+
name="Sabanews", version=VERSION, description="This part of dataset covers Sabanews news paper"
|
104 |
+
),
|
105 |
+
datasets.BuilderConfig(
|
106 |
+
name="SaudiYoum", version=VERSION, description="This part of dataset covers SaudiYoum news paper"
|
107 |
+
),
|
108 |
+
datasets.BuilderConfig(
|
109 |
+
name="Techreen", version=VERSION, description="This part of dataset covers Techreen news paper"
|
110 |
+
),
|
111 |
+
datasets.BuilderConfig(
|
112 |
+
name="Youm7", version=VERSION, description="This part of dataset covers Youm7 news paper"
|
113 |
+
),
|
114 |
+
]
|
115 |
+
|
116 |
+
def _info(self):
|
117 |
+
features = datasets.Features(
|
118 |
+
{
|
119 |
+
"url": datasets.Value("string"),
|
120 |
+
"head_line": datasets.Value("string"),
|
121 |
+
"date": datasets.Value("string"),
|
122 |
+
"text": datasets.Value("string"),
|
123 |
+
}
|
124 |
+
)
|
125 |
+
return datasets.DatasetInfo(
|
126 |
+
description=_DESCRIPTION,
|
127 |
+
features=features,
|
128 |
+
homepage=_HOMEPAGE,
|
129 |
+
citation=_CITATION,
|
130 |
+
)
|
131 |
+
|
132 |
+
def _split_generators(self, dl_manager):
|
133 |
+
"""Returns SplitGenerators."""
|
134 |
+
my_paths = DATASET_PATHS[self.config.name]
|
135 |
+
data_dir = dl_manager.extract(my_paths)
|
136 |
+
my_file_name = f"{self.config.name}_utf_8.xml"
|
137 |
+
return [
|
138 |
+
datasets.SplitGenerator(
|
139 |
+
name=datasets.Split.TRAIN,
|
140 |
+
gen_kwargs={
|
141 |
+
"filepath": os.path.join(data_dir, my_file_name),
|
142 |
+
},
|
143 |
+
),
|
144 |
+
]
|
145 |
+
|
146 |
+
def _generate_examples(self, filepath):
|
147 |
+
"""Yields examples."""
|
148 |
+
data_tag = self.config.name
|
149 |
+
pattern = re.compile(rf".*?<{data_tag}(.*?)</{data_tag}.*?", re.MULTILINE | re.DOTALL)
|
150 |
+
key = 0
|
151 |
+
lines = ""
|
152 |
+
with open(filepath, mode="r", encoding="utf-8") as f:
|
153 |
+
for i, line in enumerate(f):
|
154 |
+
lines += line
|
155 |
+
if f"</{data_tag}" in line:
|
156 |
+
match = pattern.match(lines)
|
157 |
+
lines = ""
|
158 |
+
if match:
|
159 |
+
record = match.group(1)
|
160 |
+
text = self._clean_text(self._extract_tag("Text", record))
|
161 |
+
url = self._extract_tag("URL", record)
|
162 |
+
head_line = self._clean_text(self._extract_tag("Headline", record))
|
163 |
+
date = self._extract_tag("Dateline", record)
|
164 |
+
yield key, {"url": url, "head_line": head_line, "date": date, "text": text}
|
165 |
+
key += 1
|
166 |
+
|
167 |
+
@staticmethod
|
168 |
+
def _extract_tag(tag, text):
|
169 |
+
# check if the tag is misspelled
|
170 |
+
for pattern in TAG_PATTERNS[tag]:
|
171 |
+
match = pattern.match(text)
|
172 |
+
if match:
|
173 |
+
return match.group(1)
|
174 |
+
return ""
|
175 |
+
|
176 |
+
@staticmethod
|
177 |
+
def _clean_text(text):
|
178 |
+
return text.replace("?", "")
|
dataset/README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:273ea4163ca8bfb49c3a59c304bea5aa4b4a29c1b2d85e8286685035a7b5452a
|
3 |
+
size 1002
|
dataset/dataset/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
this dataset is based on the [unofficial copy](https://drive.google.com/drive/folders/1F2wCEfFHzJqX7eTuWhh-pGtrsaHPvTT8?usp=drive_link) ([discussion](https://huggingface.co/datasets/arabic_billion_words/discussions/3)) of the data, and assumes it was downloaded properly. Put the `new_data_*` files to the `./dataset` folder like this:
|
2 |
+
```
|
3 |
+
[user@machine /path/to/dataset]$ tree
|
4 |
+
.
|
5 |
+
├── arabic_billion_words.py
|
6 |
+
├── dataset
|
7 |
+
│ ├── new_data_Alittihad_XML_utf_8.rar
|
8 |
+
│ ├── new_data_Almasryalyoum_XML_utf_8.rar
|
9 |
+
│ ├── new_data_Almustaqbal_XML_utf_8.rar
|
10 |
+
│ ├── new_data_Alqabas_XML_utf_8.rar
|
11 |
+
│ ├── new_data_Echoroukonline_XML_utf_8.rar
|
12 |
+
│ ├── new_data_Ryiadh_XML_utf_8.rar
|
13 |
+
│ ├── new_data_Sabanews_XML_utf_8.rar
|
14 |
+
│ ├── new_data_SaudiYoum_XML_utf_8.rar
|
15 |
+
│ ├── new_data_Techreen_XML_utf_8.rar
|
16 |
+
│ └── new_data_Youm7_XML_utf_8.rar
|
17 |
+
├── dataset_infos.json
|
18 |
+
├── README.md
|
19 |
+
└── usage_example.py
|
20 |
+
```
|
dataset/dataset/new_data_Alittihad_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6dd90f7ca98699e924e0ea423dc9f4f648c645379f8bffe15eeb97af00fd6fc0
|
3 |
+
size 348259999
|
dataset/dataset/new_data_Almasryalyoum_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f88d24179fa97df8d179242cb564301be2c7a4ecd36a027815b8ce1563059e7a
|
3 |
+
size 242604438
|
dataset/dataset/new_data_Almustaqbal_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dff3361ad821f3bd3912cd7282db5c15a34919312b9bc7d708a8b30782c7fc36
|
3 |
+
size 350826797
|
dataset/dataset/new_data_Alqabas_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5ea70add534220a8caf8d230959f134f49a822ce3612adb4f1bb537dc3cc6b4
|
3 |
+
size 595274646
|
dataset/dataset/new_data_Echoroukonline_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f3e85bd99caeb9c5c4922edcd18720fc3700fd6751febfa7ee72e05a584a270
|
3 |
+
size 108184378
|
dataset/dataset/new_data_Ryiadh_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c934867e53cb57d45ff99a8b5cfa991ae255a1ecb20e79309a41af2aa3e45c15
|
3 |
+
size 691264971
|
dataset/dataset/new_data_Sabanews_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9b2f1ac8ed2a5e89ab9a6bcd82a0d825569b813b53cd83419968782e9946dbe
|
3 |
+
size 38214558
|
dataset/dataset/new_data_SaudiYoum_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d4cbb5554acb03fb7ce271a0b708c1bc6bcf31593ae8c670bed7f8c22335a915
|
3 |
+
size 605537923
|
dataset/dataset/new_data_Techreen_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e4ab520399069fd38d9d80f4429fc05efaae51a912e1467becfc2686e424770
|
3 |
+
size 252976781
|
dataset/dataset/new_data_Youm7_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd81aa0b3d74e5d9a07377369ea473d8a7bd51cb5826e9809d700de2ddeffe23
|
3 |
+
size 617708074
|
dataset/new_data_Alittihad_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6dd90f7ca98699e924e0ea423dc9f4f648c645379f8bffe15eeb97af00fd6fc0
|
3 |
+
size 348259999
|
dataset/new_data_Almasryalyoum_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f88d24179fa97df8d179242cb564301be2c7a4ecd36a027815b8ce1563059e7a
|
3 |
+
size 242604438
|
dataset/new_data_Almustaqbal_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dff3361ad821f3bd3912cd7282db5c15a34919312b9bc7d708a8b30782c7fc36
|
3 |
+
size 350826797
|
dataset/new_data_Echoroukonline_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f3e85bd99caeb9c5c4922edcd18720fc3700fd6751febfa7ee72e05a584a270
|
3 |
+
size 108184378
|
dataset/new_data_Sabanews_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9b2f1ac8ed2a5e89ab9a6bcd82a0d825569b813b53cd83419968782e9946dbe
|
3 |
+
size 38214558
|
dataset/new_data_SaudiYoum_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e190e23bf62507b620b618910484131d030f720e7f529f2be3e1e591618c1557
|
3 |
+
size 68947968
|
dataset/new_data_Techreen_XML_utf_8.rar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e4ab520399069fd38d9d80f4429fc05efaae51a912e1467becfc2686e424770
|
3 |
+
size 252976781
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"Alittihad": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Alittihad", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1601790302, "num_examples": 349342, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Alittihad_XML_utf_8.rar": {"num_bytes": 348259999, "checksum": "6dd90f7ca98699e924e0ea423dc9f4f648c645379f8bffe15eeb97af00fd6fc0"}}, "download_size": 348259999, "post_processing_size": null, "dataset_size": 1601790302, "size_in_bytes": 1950050301}, "Almasryalyoum": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Almasryalyoum", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1056197870, "num_examples": 291723, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Almasryalyoum_XML_utf_8.rar": {"num_bytes": 242604438, "checksum": "f88d24179fa97df8d179242cb564301be2c7a4ecd36a027815b8ce1563059e7a"}}, "download_size": 242604438, "post_processing_size": null, "dataset_size": 1056197870, "size_in_bytes": 1298802308}, "Almustaqbal": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Almustaqbal", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1545659336, "num_examples": 446873, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Almustaqbal_XML_utf_8.rar": {"num_bytes": 350826797, "checksum": "dff3361ad821f3bd3912cd7282db5c15a34919312b9bc7d708a8b30782c7fc36"}}, "download_size": 350826797, "post_processing_size": null, "dataset_size": 1545659336, "size_in_bytes": 1896486133}, "Alqabas": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Alqabas", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2631729746, "num_examples": 817274, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Alqabas_XML_utf_8.rar": {"num_bytes": 595274646, "checksum": "e5ea70add534220a8caf8d230959f134f49a822ce3612adb4f1bb537dc3cc6b4"}}, "download_size": 595274646, "post_processing_size": null, "dataset_size": 2631729746, "size_in_bytes": 3227004392}, "Echoroukonline": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Echoroukonline", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 464386206, "num_examples": 139732, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Echoroukonline_XML_utf_8.rar": {"num_bytes": 108184378, "checksum": "8f3e85bd99caeb9c5c4922edcd18720fc3700fd6751febfa7ee72e05a584a270"}}, "download_size": 108184378, "post_processing_size": null, "dataset_size": 464386206, "size_in_bytes": 572570584}, "Ryiadh": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Ryiadh", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3101294859, "num_examples": 858188, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Ryiadh_XML_utf_8.rar": {"num_bytes": 691264971, "checksum": "c934867e53cb57d45ff99a8b5cfa991ae255a1ecb20e79309a41af2aa3e45c15"}}, "download_size": 691264971, "post_processing_size": null, "dataset_size": 3101294859, "size_in_bytes": 3792559830}, "Sabanews": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Sabanews", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 198019614, "num_examples": 92149, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Sabanews_XML_utf_8.rar": {"num_bytes": 38214558, "checksum": "c9b2f1ac8ed2a5e89ab9a6bcd82a0d825569b813b53cd83419968782e9946dbe"}}, "download_size": 38214558, "post_processing_size": null, "dataset_size": 198019614, "size_in_bytes": 236234172}, "SaudiYoum": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "SaudiYoum", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2723291416, "num_examples": 888068, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/SaudiYoum_XML_utf_8.rar": {"num_bytes": 605537923, "checksum": "d4cbb5554acb03fb7ce271a0b708c1bc6bcf31593ae8c670bed7f8c22335a915"}}, "download_size": 605537923, "post_processing_size": null, "dataset_size": 2723291416, "size_in_bytes": 3328829339}, "Techreen": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Techreen", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1103458209, "num_examples": 314597, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Techreen_XML_utf_8.rar": {"num_bytes": 252976781, "checksum": "5e4ab520399069fd38d9d80f4429fc05efaae51a912e1467becfc2686e424770"}}, "download_size": 252976781, "post_processing_size": null, "dataset_size": 1103458209, "size_in_bytes": 1356434990}, "Youm7": {"description": "Abu El-Khair Corpus is an Arabic text corpus, that includes more than five million newspaper articles.\nIt contains over a billion and a half words in total, out of which, there are about three million unique words.\nThe corpus is encoded with two types of encoding, namely: UTF-8, and Windows CP-1256.\nAlso it was marked with two mark-up languages, namely: SGML, and XML.\n", "citation": "@article{el20161,\n title={1.5 billion words arabic corpus},\n author={El-Khair, Ibrahim Abu},\n journal={arXiv preprint arXiv:1611.04033},\n year={2016}\n}\n", "homepage": "http://abuelkhair.net/index.php/en/arabic/abu-el-khair-corpus", "license": "", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "head_line": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "arabic_billion_words", "config_name": "Youm7", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3004689464, "num_examples": 1172136, "dataset_name": "arabic_billion_words"}}, "download_checksums": {"http://abuelkhair.net/corpus/Youm7_XML_utf_8.rar": {"num_bytes": 617708074, "checksum": "cd81aa0b3d74e5d9a07377369ea473d8a7bd51cb5826e9809d700de2ddeffe23"}}, "download_size": 617708074, "post_processing_size": null, "dataset_size": 3004689464, "size_in_bytes": 3622397538}}
|