|
--- |
|
language: |
|
- ar |
|
size_categories: |
|
- 1B<n<10B |
|
task_categories: |
|
- text-classification |
|
- question-answering |
|
- translation |
|
- summarization |
|
- conversational |
|
- text-generation |
|
- text2text-generation |
|
- fill-mask |
|
pretty_name: Mixed Arabic Datasets (MAD) Corpus |
|
dataset_info: |
|
- config_name: Ara--Abdelaziz--MNAD.v1 |
|
features: |
|
- name: Title |
|
dtype: string |
|
- name: Body |
|
dtype: string |
|
- name: Category |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1101921980 |
|
num_examples: 418563 |
|
download_size: 526103216 |
|
dataset_size: 1101921980 |
|
- config_name: Ara--Abdelaziz--QuranExe |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: resource_name |
|
dtype: string |
|
- name: verses_keys |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 133108687 |
|
num_examples: 49888 |
|
download_size: 58769326 |
|
dataset_size: 133108687 |
|
- config_name: Ara--Abdelaziz--tweet_sentiment_multilingual |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: label |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 306108 |
|
num_examples: 1839 |
|
download_size: 172509 |
|
dataset_size: 306108 |
|
- config_name: Ara--Ali-C137--Hindawi-Books-dataset |
|
features: |
|
- name: BookLink |
|
dtype: string |
|
- name: BookName |
|
dtype: string |
|
- name: AuthorName |
|
dtype: string |
|
- name: AboutBook |
|
dtype: string |
|
- name: ChapterLink |
|
dtype: string |
|
- name: ChapterName |
|
dtype: string |
|
- name: ChapterText |
|
dtype: string |
|
- name: AboutAuthor |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1364854259 |
|
num_examples: 49821 |
|
download_size: 494678002 |
|
dataset_size: 1364854259 |
|
- config_name: Ara--MBZUAI--Bactrian-X |
|
features: |
|
- name: instruction |
|
dtype: string |
|
- name: input |
|
dtype: string |
|
- name: id |
|
dtype: string |
|
- name: output |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 66093524 |
|
num_examples: 67017 |
|
download_size: 33063779 |
|
dataset_size: 66093524 |
|
- config_name: Ara--OpenAssistant--oasst1 |
|
features: |
|
- name: message_id |
|
dtype: string |
|
- name: parent_id |
|
dtype: string |
|
- name: user_id |
|
dtype: string |
|
- name: created_date |
|
dtype: string |
|
- name: text |
|
dtype: string |
|
- name: role |
|
dtype: string |
|
- name: lang |
|
dtype: string |
|
- name: review_count |
|
dtype: int32 |
|
- name: review_result |
|
dtype: bool |
|
- name: deleted |
|
dtype: bool |
|
- name: rank |
|
dtype: float64 |
|
- name: synthetic |
|
dtype: bool |
|
- name: model_name |
|
dtype: 'null' |
|
- name: detoxify |
|
dtype: 'null' |
|
- name: message_tree_id |
|
dtype: string |
|
- name: tree_state |
|
dtype: string |
|
- name: emojis |
|
struct: |
|
- name: count |
|
sequence: int32 |
|
- name: name |
|
sequence: string |
|
- name: labels |
|
struct: |
|
- name: count |
|
sequence: int32 |
|
- name: name |
|
sequence: string |
|
- name: value |
|
sequence: float64 |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 58168 |
|
num_examples: 56 |
|
download_size: 30984 |
|
dataset_size: 58168 |
|
- config_name: Ara--Wikipedia |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: title |
|
dtype: string |
|
- name: text |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 3052201469 |
|
num_examples: 1205403 |
|
download_size: 1316212231 |
|
dataset_size: 3052201469 |
|
- config_name: Ary--AbderrahmanSkiredj1--Darija-Wikipedia |
|
features: |
|
- name: text |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 8104410 |
|
num_examples: 4862 |
|
download_size: 3229966 |
|
dataset_size: 8104410 |
|
- config_name: Ary--Ali-C137--Darija-Stories-Dataset |
|
features: |
|
- name: ChapterName |
|
dtype: string |
|
- name: ChapterLink |
|
dtype: string |
|
- name: Author |
|
dtype: string |
|
- name: Text |
|
dtype: string |
|
- name: Tags |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 476926644 |
|
num_examples: 6142 |
|
download_size: 241528641 |
|
dataset_size: 476926644 |
|
- config_name: Ary--Wikipedia |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: title |
|
dtype: string |
|
- name: text |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 10007364 |
|
num_examples: 6703 |
|
download_size: 4094377 |
|
dataset_size: 10007364 |
|
- config_name: Arz--Wikipedia |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: title |
|
dtype: string |
|
- name: text |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1364641408 |
|
num_examples: 1617770 |
|
download_size: 306420318 |
|
dataset_size: 1364641408 |
|
configs: |
|
- config_name: Ara--Abdelaziz--MNAD.v1 |
|
data_files: |
|
- split: train |
|
path: Ara--Abdelaziz--MNAD.v1/train-* |
|
- config_name: Ara--Abdelaziz--QuranExe |
|
data_files: |
|
- split: train |
|
path: Ara--Abdelaziz--QuranExe/train-* |
|
- config_name: Ara--Abdelaziz--tweet_sentiment_multilingual |
|
data_files: |
|
- split: train |
|
path: Ara--Abdelaziz--tweet_sentiment_multilingual/train-* |
|
- config_name: Ara--Ali-C137--Hindawi-Books-dataset |
|
data_files: |
|
- split: train |
|
path: Ara--Ali-C137--Hindawi-Books-dataset/train-* |
|
- config_name: Ara--MBZUAI--Bactrian-X |
|
data_files: |
|
- split: train |
|
path: Ara--MBZUAI--Bactrian-X/train-* |
|
- config_name: Ara--OpenAssistant--oasst1 |
|
data_files: |
|
- split: train |
|
path: Ara--OpenAssistant--oasst1/train-* |
|
- config_name: Ara--Wikipedia |
|
data_files: |
|
- split: train |
|
path: Ara--Wikipedia/train-* |
|
- config_name: Ary--AbderrahmanSkiredj1--Darija-Wikipedia |
|
data_files: |
|
- split: train |
|
path: Ary--AbderrahmanSkiredj1--Darija-Wikipedia/train-* |
|
- config_name: Ary--Ali-C137--Darija-Stories-Dataset |
|
data_files: |
|
- split: train |
|
path: Ary--Ali-C137--Darija-Stories-Dataset/train-* |
|
- config_name: Ary--Wikipedia |
|
data_files: |
|
- split: train |
|
path: Ary--Wikipedia/train-* |
|
- config_name: Arz--Wikipedia |
|
data_files: |
|
- split: train |
|
path: Arz--Wikipedia/train-* |
|
--- |
|
# Dataset Card for "Mixed Arabic Datasets (MAD) Corpus" |
|
|
|
**The Mixed Arabic Datasets Corpus : A Community-Driven Collection of Diverse Arabic Texts** |
|
|
|
## Dataset Description |
|
|
|
The Mixed Arabic Datasets (MAD) presents a dynamic compilation of diverse Arabic texts sourced from various online platforms and datasets. It addresses a critical challenge faced by researchers, linguists, and language enthusiasts: the fragmentation of Arabic language datasets across the Internet. With MAD, we are trying to centralize these dispersed resources into a single, comprehensive repository. |
|
|
|
Encompassing a wide spectrum of content, ranging from social media conversations to literary masterpieces, MAD captures the rich tapestry of Arabic communication, including both standard Arabic and regional dialects. |
|
|
|
This corpus offers comprehensive insights into the linguistic diversity and cultural nuances of Arabic expression. |
|
|
|
## Usage |
|
|
|
If you want to use this dataset you pick one among the available configs: |
|
|
|
`Ara--MBZUAI--Bactrian-X` | `Ara--OpenAssistant--oasst1` | `Ary--AbderrahmanSkiredj1--Darija-Wikipedia` |
|
|
|
`Ara--Wikipedia` | `Ary--Wikipedia` | `Arz--Wikipedia` |
|
|
|
`Ary--Ali-C137--Darija-Stories-Dataset` | `Ara--Ali-C137--Hindawi-Books-dataset` | `` |
|
|
|
Example of usage: |
|
|
|
```python |
|
dataset = load_dataset('M-A-D/Mixed-Arabic-Datasets-Repo', 'Ara--MBZUAI--Bactrian-X') |
|
``` |
|
|
|
If you loaded multiple datasets and wanted to merge them together then you can simply laverage `concatenate_datasets()` from `datasets` |
|
|
|
```pyhton |
|
dataset3 = concatenate_datasets([dataset1['train'], dataset2['train']]) |
|
``` |
|
|
|
Note : proccess the datasets before merging in order to make sure you have a new dataset that is consistent |
|
|
|
## Dataset Size |
|
|
|
The Mixed Arabic Datasets (MAD) is a dynamic and evolving collection, with its size fluctuating as new datasets are added or removed. As MAD continuously expands, it becomes a living resource that adapts to the ever-changing landscape of Arabic language datasets. |
|
|
|
**Dataset List** |
|
|
|
MAD draws from a diverse array of sources, each contributing to its richness and breadth. While the collection is constantly evolving, some of the datasets that are poised to join MAD in the near future include: |
|
|
|
- [β] OpenAssistant/oasst1 (ar portion) : [Dataset Link](https://huggingface.co/datasets/OpenAssistant/oasst1) |
|
- [β] MBZUAI/Bactrian-X (ar portion) : [Dataset Link](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ar/train) |
|
- [β] AbderrahmanSkiredj1/Darija-Wikipedia : [Dataset Link](https://huggingface.co/datasets/AbderrahmanSkiredj1/moroccan_darija_wikipedia_dataset) |
|
- [β] Arabic Wikipedia : [Dataset Link](https://huggingface.co/datasets/wikipedia) |
|
- [β] Moroccan Arabic Wikipedia : [Dataset Link](https://huggingface.co/datasets/wikipedia) |
|
- [β] Egyptian Arabic Wikipedia : [Dataset Link](https://huggingface.co/datasets/wikipedia) |
|
- [β] Darija Stories Dataset : [Dataset Link](https://huggingface.co/datasets/Ali-C137/Darija-Stories-Dataset) |
|
- [β] Hindawi Books Dataset : [Dataset Link](https://huggingface.co/datasets/Ali-C137/Hindawi-Books-dataset) |
|
- [] uonlp/CulturaX - ar : [Dataset Link](https://huggingface.co/datasets/uonlp/CulturaX/viewer/ar/train) |
|
- [] Pain/ArabicTweets : [Dataset Link](https://huggingface.co/datasets/pain/Arabic-Tweets) |
|
- [] Abu-El-Khair Corpus : [Dataset Link](https://huggingface.co/datasets/arabic_billion_words) |
|
- [β] QuranExe : [Dataset Link](https://huggingface.co/datasets/mustapha/QuranExe) |
|
- [β] MNAD : [Dataset Link](https://huggingface.co/datasets/J-Mourad/MNAD.v1) |
|
- [] IADD : [Dataset Link](https://raw.githubusercontent.com/JihadZa/IADD/main/IADD.json) |
|
- [] OSIAN : [Dataset Link](https://wortschatz.uni-leipzig.de/en/download/Arabic#ara-tn_newscrawl-OSIAN_2018) |
|
- [] MAC corpus : [Dataset Link](https://raw.githubusercontent.com/LeMGarouani/MAC/main/MAC%20corpus.csv) |
|
- [] Goud.ma-Sum : [Dataset Link](https://huggingface.co/datasets/Goud/Goud-sum) |
|
- [] SaudiNewsNet : [Dataset Link](https://huggingface.co/datasets/saudinewsnet) |
|
- [] Hindawi-Books-dataset : [Dataset Link](https://huggingface.co/datasets/Ali-C137/Hindawi-Books-dataset) |
|
- [] Miracl : [Dataset Link](https://huggingface.co/datasets/miracl/miracl) |
|
- [β] CardiffNLP/TweetSentimentMulti : [Dataset Link](https://huggingface.co/datasets/cardiffnlp/tweet_sentiment_multilingual) |
|
- [] OSCAR-2301 : [Dataset Link](https://huggingface.co/datasets/oscar-corpus/OSCAR-2301/viewer/ar/train) |
|
- [] mc4 : [Dataset Link](https://huggingface.co/datasets/mc4/viewer/ar/train) |
|
- [] Muennighoff/xP3x : [Dataset Link](https://huggingface.co/datasets/Muennighoff/xP3x) |
|
- [] Ai_Society : [Dataset Link](https://huggingface.co/datasets/camel-ai/ai_society_translated) |
|
|
|
## Potential Use Cases |
|
|
|
The Mixed Arabic Datasets (MAD) holds the potential to catalyze a multitude of groundbreaking applications: |
|
|
|
- **Linguistic Analysis:** Employ MAD to conduct in-depth linguistic studies, exploring dialectal variances, language evolution, and grammatical structures. |
|
- **Topic Modeling:** Dive into diverse themes and subjects through the extensive collection, revealing insights into emerging trends and prevalent topics. |
|
- **Sentiment Understanding:** Decode sentiments spanning Arabic dialects, revealing cultural nuances and emotional dynamics. |
|
- **Sociocultural Research:** Embark on a sociolinguistic journey, unraveling the intricate connection between language, culture, and societal shifts. |
|
|
|
## Dataset Access |
|
|
|
MAD's access mechanism is unique: while it doesn't carry a general license itself, each constituent dataset within the corpus retains its individual license. By accessing the dataset details through the provided links in the "Dataset List" section above, users can understand the specific licensing terms for each dataset. |
|
|
|
### Join Us on Discord |
|
|
|
For discussions, contributions, and community interactions, join us on Discord! [![Discord](https://img.shields.io/discord/798499298231726101?label=Join%20us%20on%20Discord&logo=discord&logoColor=white&style=for-the-badge)](https://discord.gg/2NpJ9JGm) |
|
|
|
### How to Contribute |
|
|
|
Want to contribute to the Mixed Arabic Datasets project? Follow our comprehensive guide on Google Colab for step-by-step instructions: [Contribution Guide](https://colab.research.google.com/drive/1kOIRoicgCOV8TPvASAI_2uMY7rpXnqzJ?usp=sharing). |
|
|
|
**Note**: If you'd like to test a contribution before submitting it, feel free to do so on the [MAD Test Dataset](https://huggingface.co/datasets/M-A-D/Mixed-Arabic-Dataset-test). |
|
|
|
## Citation |
|
|
|
``` |
|
@dataset{ |
|
title = {Mixed Arabic Datasets (MAD)}, |
|
author = {MAD Community}, |
|
howpublished = {Dataset}, |
|
url = {https://huggingface.co/datasets/M-A-D/Mixed-Arabic-Datasets-Repo}, |
|
year = {2023}, |
|
} |
|
``` |