File size: 14,307 Bytes
a2b1ada
38271c3
 
 
 
 
 
 
 
 
 
 
 
 
 
a2b1ada
46cdf1c
 
 
 
 
 
 
 
 
 
 
 
 
 
9d511f9
 
 
 
 
 
 
 
 
 
 
 
 
 
5f339c3
 
 
 
 
 
 
 
 
 
 
 
4c7524f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7e3bb7
 
 
 
 
 
 
 
 
 
 
 
 
 
9873114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34b329a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38271c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ae059a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a67e1a
 
 
 
 
 
 
 
 
 
e036b74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9873114
a2b1ada
 
 
 
 
 
 
 
 
38448d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b93864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02baf67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2b1ada
46cdf1c
 
 
 
9d511f9
 
 
 
5f339c3
 
 
 
4c7524f
 
 
 
b7e3bb7
 
 
 
9873114
 
 
 
34b329a
 
 
 
38271c3
 
 
 
8ae059a
 
 
 
9a67e1a
 
 
 
e036b74
 
 
 
a2b1ada
 
 
 
38448d5
 
 
 
9b93864
 
 
 
02baf67
 
 
 
a2b1ada
da0d760
a2b1ada
da0d760
 
 
 
 
 
 
 
 
 
b4507b7
3c048ee
7e41895
 
403036d
9372cf0
403036d
b4507b7
3c048ee
 
d8d715b
3c048ee
d8d715b
bc9041a
d8d715b
3c048ee
f845479
3c048ee
d8d715b
1941bef
d8d715b
3c048ee
b4507b7
 
da0d760
 
 
 
 
 
 
 
 
 
 
30e4cb0
db6d541
 
3c048ee
 
3046c0c
da0d760
 
46bcb2d
 
da0d760
 
 
78cb32c
 
 
2c649c4
da0d760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
becb384
 
 
 
 
 
 
 
 
 
da0d760
 
 
 
 
becb384
da0d760
bc9041a
da0d760
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
---
language:
- ar
size_categories:
- 1B<n<10B
task_categories:
- text-classification
- question-answering
- translation
- summarization
- conversational
- text-generation
- text2text-generation
- fill-mask
pretty_name: Mixed Arabic Datasets (MAD) Corpus
dataset_info:
- config_name: Ara--Abdelaziz--MNAD.v1
  features:
  - name: Title
    dtype: string
  - name: Body
    dtype: string
  - name: Category
    dtype: string
  splits:
  - name: train
    num_bytes: 1101921980
    num_examples: 418563
  download_size: 526103216
  dataset_size: 1101921980
- config_name: Ara--Abdelaziz--QuranExe
  features:
  - name: text
    dtype: string
  - name: resource_name
    dtype: string
  - name: verses_keys
    dtype: string
  splits:
  - name: train
    num_bytes: 133108687
    num_examples: 49888
  download_size: 58769326
  dataset_size: 133108687
- config_name: Ara--Abdelaziz--tweet_sentiment_multilingual
  features:
  - name: text
    dtype: string
  - name: label
    dtype: int64
  splits:
  - name: train
    num_bytes: 306108
    num_examples: 1839
  download_size: 172509
  dataset_size: 306108
- config_name: Ara--Ali-C137--Hindawi-Books-dataset
  features:
  - name: BookLink
    dtype: string
  - name: BookName
    dtype: string
  - name: AuthorName
    dtype: string
  - name: AboutBook
    dtype: string
  - name: ChapterLink
    dtype: string
  - name: ChapterName
    dtype: string
  - name: ChapterText
    dtype: string
  - name: AboutAuthor
    dtype: string
  splits:
  - name: train
    num_bytes: 1364854259
    num_examples: 49821
  download_size: 494678002
  dataset_size: 1364854259
- config_name: Ara--Goud--Goud-sum
  features:
  - name: article
    dtype: string
  - name: headline
    dtype: string
  - name: categories
    dtype: string
  splits:
  - name: train
    num_bytes: 288296544
    num_examples: 139288
  download_size: 147735776
  dataset_size: 288296544
- config_name: Ara--MBZUAI--Bactrian-X
  features:
  - name: instruction
    dtype: string
  - name: input
    dtype: string
  - name: id
    dtype: string
  - name: output
    dtype: string
  splits:
  - name: train
    num_bytes: 66093524
    num_examples: 67017
  download_size: 33063779
  dataset_size: 66093524
- config_name: Ara--OpenAssistant--oasst1
  features:
  - name: message_id
    dtype: string
  - name: parent_id
    dtype: string
  - name: user_id
    dtype: string
  - name: created_date
    dtype: string
  - name: text
    dtype: string
  - name: role
    dtype: string
  - name: lang
    dtype: string
  - name: review_count
    dtype: int32
  - name: review_result
    dtype: bool
  - name: deleted
    dtype: bool
  - name: rank
    dtype: float64
  - name: synthetic
    dtype: bool
  - name: model_name
    dtype: 'null'
  - name: detoxify
    dtype: 'null'
  - name: message_tree_id
    dtype: string
  - name: tree_state
    dtype: string
  - name: emojis
    struct:
    - name: count
      sequence: int32
    - name: name
      sequence: string
  - name: labels
    struct:
    - name: count
      sequence: int32
    - name: name
      sequence: string
    - name: value
      sequence: float64
  - name: __index_level_0__
    dtype: int64
  splits:
  - name: train
    num_bytes: 58168
    num_examples: 56
  download_size: 30984
  dataset_size: 58168
- config_name: Ara--Wikipedia
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 3052201469
    num_examples: 1205403
  download_size: 1316212231
  dataset_size: 3052201469
- config_name: Ara--miracl--miracl
  features:
  - name: query_id
    dtype: string
  - name: query
    dtype: string
  - name: positive_passages
    list:
    - name: docid
      dtype: string
    - name: text
      dtype: string
    - name: title
      dtype: string
  - name: negative_passages
    list:
    - name: docid
      dtype: string
    - name: text
      dtype: string
    - name: title
      dtype: string
  splits:
  - name: train
    num_bytes: 32012083
    num_examples: 3495
  download_size: 15798509
  dataset_size: 32012083
- config_name: Ara--pain--Arabic-Tweets
  features:
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 41639770853
    num_examples: 202700438
  download_size: 22561651700
  dataset_size: 41639770853
- config_name: Ara--saudinewsnet
  features:
  - name: source
    dtype: string
  - name: url
    dtype: string
  - name: date_extracted
    dtype: string
  - name: title
    dtype: string
  - name: author
    dtype: string
  - name: content
    dtype: string
  splits:
  - name: train
    num_bytes: 103654009
    num_examples: 31030
  download_size: 49117164
  dataset_size: 103654009
- config_name: Ary--AbderrahmanSkiredj1--Darija-Wikipedia
  features:
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 8104410
    num_examples: 4862
  download_size: 3229966
  dataset_size: 8104410
- config_name: Ary--Ali-C137--Darija-Stories-Dataset
  features:
  - name: ChapterName
    dtype: string
  - name: ChapterLink
    dtype: string
  - name: Author
    dtype: string
  - name: Text
    dtype: string
  - name: Tags
    dtype: int64
  splits:
  - name: train
    num_bytes: 476926644
    num_examples: 6142
  download_size: 241528641
  dataset_size: 476926644
- config_name: Ary--Wikipedia
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 10007364
    num_examples: 6703
  download_size: 4094377
  dataset_size: 10007364
- config_name: Arz--Wikipedia
  features:
  - name: id
    dtype: string
  - name: url
    dtype: string
  - name: title
    dtype: string
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 1364641408
    num_examples: 1617770
  download_size: 306420318
  dataset_size: 1364641408
configs:
- config_name: Ara--Abdelaziz--MNAD.v1
  data_files:
  - split: train
    path: Ara--Abdelaziz--MNAD.v1/train-*
- config_name: Ara--Abdelaziz--QuranExe
  data_files:
  - split: train
    path: Ara--Abdelaziz--QuranExe/train-*
- config_name: Ara--Abdelaziz--tweet_sentiment_multilingual
  data_files:
  - split: train
    path: Ara--Abdelaziz--tweet_sentiment_multilingual/train-*
- config_name: Ara--Ali-C137--Hindawi-Books-dataset
  data_files:
  - split: train
    path: Ara--Ali-C137--Hindawi-Books-dataset/train-*
- config_name: Ara--Goud--Goud-sum
  data_files:
  - split: train
    path: Ara--Goud--Goud-sum/train-*
- config_name: Ara--MBZUAI--Bactrian-X
  data_files:
  - split: train
    path: Ara--MBZUAI--Bactrian-X/train-*
- config_name: Ara--OpenAssistant--oasst1
  data_files:
  - split: train
    path: Ara--OpenAssistant--oasst1/train-*
- config_name: Ara--Wikipedia
  data_files:
  - split: train
    path: Ara--Wikipedia/train-*
- config_name: Ara--miracl--miracl
  data_files:
  - split: train
    path: Ara--miracl--miracl/train-*
- config_name: Ara--pain--Arabic-Tweets
  data_files:
  - split: train
    path: Ara--pain--Arabic-Tweets/train-*
- config_name: Ara--saudinewsnet
  data_files:
  - split: train
    path: Ara--saudinewsnet/train-*
- config_name: Ary--AbderrahmanSkiredj1--Darija-Wikipedia
  data_files:
  - split: train
    path: Ary--AbderrahmanSkiredj1--Darija-Wikipedia/train-*
- config_name: Ary--Ali-C137--Darija-Stories-Dataset
  data_files:
  - split: train
    path: Ary--Ali-C137--Darija-Stories-Dataset/train-*
- config_name: Ary--Wikipedia
  data_files:
  - split: train
    path: Ary--Wikipedia/train-*
- config_name: Arz--Wikipedia
  data_files:
  - split: train
    path: Arz--Wikipedia/train-*
---
# Dataset Card for "Mixed Arabic Datasets (MAD) Corpus"

**The Mixed Arabic Datasets Corpus : A Community-Driven Collection of Diverse Arabic Texts**

## Dataset Description

The Mixed Arabic Datasets (MAD) presents a dynamic compilation of diverse Arabic texts sourced from various online platforms and datasets. It addresses a critical challenge faced by researchers, linguists, and language enthusiasts: the fragmentation of Arabic language datasets across the Internet. With MAD, we are trying to centralize these dispersed resources into a single, comprehensive repository.

Encompassing a wide spectrum of content, ranging from social media conversations to literary masterpieces, MAD captures the rich tapestry of Arabic communication, including both standard Arabic and regional dialects.

This corpus offers comprehensive insights into the linguistic diversity and cultural nuances of Arabic expression.

## Usage 

If you want to use this dataset you pick one among the available configs:

`Ara--MBZUAI--Bactrian-X` | `Ara--OpenAssistant--oasst1` | `Ary--AbderrahmanSkiredj1--Darija-Wikipedia`

`Ara--Wikipedia` | `Ary--Wikipedia` | `Arz--Wikipedia`

`Ary--Ali-C137--Darija-Stories-Dataset` | `Ara--Ali-C137--Hindawi-Books-dataset` | ``

Example of usage:

```python
dataset = load_dataset('M-A-D/Mixed-Arabic-Datasets-Repo', 'Ara--MBZUAI--Bactrian-X')
```

If you loaded multiple datasets and wanted to merge them together then you can simply laverage `concatenate_datasets()` from `datasets`

```pyhton
dataset3 = concatenate_datasets([dataset1['train'], dataset2['train']])
```

Note : proccess the datasets before merging in order to make sure you have a new dataset that is consistent

## Dataset Size

The Mixed Arabic Datasets (MAD) is a dynamic and evolving collection, with its size fluctuating as new datasets are added or removed. As MAD continuously expands, it becomes a living resource that adapts to the ever-changing landscape of Arabic language datasets.

**Dataset List**

MAD draws from a diverse array of sources, each contributing to its richness and breadth. While the collection is constantly evolving, some of the datasets that are poised to join MAD in the near future include:

- [βœ”] OpenAssistant/oasst1 (ar portion) : [Dataset Link](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [βœ”] MBZUAI/Bactrian-X (ar portion) : [Dataset Link](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ar/train)
- [βœ”] AbderrahmanSkiredj1/Darija-Wikipedia : [Dataset Link](https://huggingface.co/datasets/AbderrahmanSkiredj1/moroccan_darija_wikipedia_dataset)
- [βœ”] Arabic Wikipedia : [Dataset Link](https://huggingface.co/datasets/wikipedia)
- [βœ”] Moroccan Arabic Wikipedia : [Dataset Link](https://huggingface.co/datasets/wikipedia)
- [βœ”] Egyptian Arabic Wikipedia : [Dataset Link](https://huggingface.co/datasets/wikipedia)
- [βœ”] Darija Stories Dataset : [Dataset Link](https://huggingface.co/datasets/Ali-C137/Darija-Stories-Dataset)
- [βœ”] Hindawi Books Dataset : [Dataset Link](https://huggingface.co/datasets/Ali-C137/Hindawi-Books-dataset)
- [] uonlp/CulturaX - ar : [Dataset Link](https://huggingface.co/datasets/uonlp/CulturaX/viewer/ar/train)
- [] Pain/ArabicTweets : [Dataset Link](https://huggingface.co/datasets/pain/Arabic-Tweets)
- [] Abu-El-Khair Corpus : [Dataset Link](https://huggingface.co/datasets/arabic_billion_words)
- [βœ”] QuranExe : [Dataset Link](https://huggingface.co/datasets/mustapha/QuranExe)
- [βœ”] MNAD : [Dataset Link](https://huggingface.co/datasets/J-Mourad/MNAD.v1)
- [] IADD : [Dataset Link](https://raw.githubusercontent.com/JihadZa/IADD/main/IADD.json)
- [] OSIAN : [Dataset Link](https://wortschatz.uni-leipzig.de/en/download/Arabic#ara-tn_newscrawl-OSIAN_2018)
- [] MAC corpus : [Dataset Link](https://raw.githubusercontent.com/LeMGarouani/MAC/main/MAC%20corpus.csv)
- [βœ”] Goud.ma-Sum : [Dataset Link](https://huggingface.co/datasets/Goud/Goud-sum)
- [βœ”] SaudiNewsNet : [Dataset Link](https://huggingface.co/datasets/saudinewsnet)
- [βœ”] Miracl : [Dataset Link](https://huggingface.co/datasets/miracl/miracl)
- [βœ”] CardiffNLP/TweetSentimentMulti : [Dataset Link](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual)
- [] OSCAR-2301 : [Dataset Link](https://huggingface.co/datasets/oscar-corpus/OSCAR-2301/viewer/ar/train)
- [] mc4 : [Dataset Link](https://huggingface.co/datasets/mc4/viewer/ar/train)
- [] Muennighoff/xP3x : [Dataset Link](https://huggingface.co/datasets/Muennighoff/xP3x)
- [] Ai_Society : [Dataset Link](https://huggingface.co/datasets/camel-ai/ai_society_translated)

## Potential Use Cases

The Mixed Arabic Datasets (MAD) holds the potential to catalyze a multitude of groundbreaking applications:

- **Linguistic Analysis:** Employ MAD to conduct in-depth linguistic studies, exploring dialectal variances, language evolution, and grammatical structures.
- **Topic Modeling:** Dive into diverse themes and subjects through the extensive collection, revealing insights into emerging trends and prevalent topics.
- **Sentiment Understanding:** Decode sentiments spanning Arabic dialects, revealing cultural nuances and emotional dynamics.
- **Sociocultural Research:** Embark on a sociolinguistic journey, unraveling the intricate connection between language, culture, and societal shifts.

## Dataset Access

MAD's access mechanism is unique: while it doesn't carry a general license itself, each constituent dataset within the corpus retains its individual license. By accessing the dataset details through the provided links in the "Dataset List" section above, users can understand the specific licensing terms for each dataset.

### Join Us on Discord

For discussions, contributions, and community interactions, join us on Discord! [![Discord](https://img.shields.io/discord/798499298231726101?label=Join%20us%20on%20Discord&logo=discord&logoColor=white&style=for-the-badge)](https://discord.gg/2NpJ9JGm)

### How to Contribute

Want to contribute to the Mixed Arabic Datasets project? Follow our comprehensive guide on Google Colab for step-by-step instructions: [Contribution Guide](https://colab.research.google.com/drive/1kOIRoicgCOV8TPvASAI_2uMY7rpXnqzJ?usp=sharing).

**Note**: If you'd like to test a contribution before submitting it, feel free to do so on the [MAD Test Dataset](https://huggingface.co/datasets/M-A-D/Mixed-Arabic-Dataset-test).

## Citation

```
@dataset{ 
title = {Mixed Arabic Datasets (MAD)},
author = {MAD Community},
howpublished = {Dataset},
url = {https://huggingface.co/datasets/M-A-D/Mixed-Arabic-Datasets-Repo},
year = {2023},
}
```