Ali-C137's picture
Update README.md
78cb32c
|
raw
history blame
14 kB
metadata
language:
  - ar
size_categories:
  - 1B<n<10B
task_categories:
  - text-classification
  - question-answering
  - translation
  - summarization
  - conversational
  - text-generation
  - text2text-generation
  - fill-mask
pretty_name: Mixed Arabic Datasets (MAD) Corpus
dataset_info:
  - config_name: Ara--Abdelaziz--MNAD.v1
    features:
      - name: Title
        dtype: string
      - name: Body
        dtype: string
      - name: Category
        dtype: string
    splits:
      - name: train
        num_bytes: 1101921980
        num_examples: 418563
    download_size: 526103216
    dataset_size: 1101921980
  - config_name: Ara--Abdelaziz--QuranExe
    features:
      - name: text
        dtype: string
      - name: resource_name
        dtype: string
      - name: verses_keys
        dtype: string
    splits:
      - name: train
        num_bytes: 133108687
        num_examples: 49888
    download_size: 58769326
    dataset_size: 133108687
  - config_name: Ara--Abdelaziz--tweet_sentiment_multilingual
    features:
      - name: text
        dtype: string
      - name: label
        dtype: int64
    splits:
      - name: train
        num_bytes: 306108
        num_examples: 1839
    download_size: 172509
    dataset_size: 306108
  - config_name: Ara--Ali-C137--Hindawi-Books-dataset
    features:
      - name: BookLink
        dtype: string
      - name: BookName
        dtype: string
      - name: AuthorName
        dtype: string
      - name: AboutBook
        dtype: string
      - name: ChapterLink
        dtype: string
      - name: ChapterName
        dtype: string
      - name: ChapterText
        dtype: string
      - name: AboutAuthor
        dtype: string
    splits:
      - name: train
        num_bytes: 1364854259
        num_examples: 49821
    download_size: 494678002
    dataset_size: 1364854259
  - config_name: Ara--Goud--Goud-sum
    features:
      - name: article
        dtype: string
      - name: headline
        dtype: string
      - name: categories
        dtype: string
    splits:
      - name: train
        num_bytes: 288296544
        num_examples: 139288
    download_size: 147735776
    dataset_size: 288296544
  - config_name: Ara--MBZUAI--Bactrian-X
    features:
      - name: instruction
        dtype: string
      - name: input
        dtype: string
      - name: id
        dtype: string
      - name: output
        dtype: string
    splits:
      - name: train
        num_bytes: 66093524
        num_examples: 67017
    download_size: 33063779
    dataset_size: 66093524
  - config_name: Ara--OpenAssistant--oasst1
    features:
      - name: message_id
        dtype: string
      - name: parent_id
        dtype: string
      - name: user_id
        dtype: string
      - name: created_date
        dtype: string
      - name: text
        dtype: string
      - name: role
        dtype: string
      - name: lang
        dtype: string
      - name: review_count
        dtype: int32
      - name: review_result
        dtype: bool
      - name: deleted
        dtype: bool
      - name: rank
        dtype: float64
      - name: synthetic
        dtype: bool
      - name: model_name
        dtype: 'null'
      - name: detoxify
        dtype: 'null'
      - name: message_tree_id
        dtype: string
      - name: tree_state
        dtype: string
      - name: emojis
        struct:
          - name: count
            sequence: int32
          - name: name
            sequence: string
      - name: labels
        struct:
          - name: count
            sequence: int32
          - name: name
            sequence: string
          - name: value
            sequence: float64
      - name: __index_level_0__
        dtype: int64
    splits:
      - name: train
        num_bytes: 58168
        num_examples: 56
    download_size: 30984
    dataset_size: 58168
  - config_name: Ara--Wikipedia
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 3052201469
        num_examples: 1205403
    download_size: 1316212231
    dataset_size: 3052201469
  - config_name: Ara--miracl--miracl
    features:
      - name: query_id
        dtype: string
      - name: query
        dtype: string
      - name: positive_passages
        list:
          - name: docid
            dtype: string
          - name: text
            dtype: string
          - name: title
            dtype: string
      - name: negative_passages
        list:
          - name: docid
            dtype: string
          - name: text
            dtype: string
          - name: title
            dtype: string
    splits:
      - name: train
        num_bytes: 32012083
        num_examples: 3495
    download_size: 15798509
    dataset_size: 32012083
  - config_name: Ara--saudinewsnet
    features:
      - name: source
        dtype: string
      - name: url
        dtype: string
      - name: date_extracted
        dtype: string
      - name: title
        dtype: string
      - name: author
        dtype: string
      - name: content
        dtype: string
    splits:
      - name: train
        num_bytes: 103654009
        num_examples: 31030
    download_size: 49117164
    dataset_size: 103654009
  - config_name: Ary--AbderrahmanSkiredj1--Darija-Wikipedia
    features:
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 8104410
        num_examples: 4862
    download_size: 3229966
    dataset_size: 8104410
  - config_name: Ary--Ali-C137--Darija-Stories-Dataset
    features:
      - name: ChapterName
        dtype: string
      - name: ChapterLink
        dtype: string
      - name: Author
        dtype: string
      - name: Text
        dtype: string
      - name: Tags
        dtype: int64
    splits:
      - name: train
        num_bytes: 476926644
        num_examples: 6142
    download_size: 241528641
    dataset_size: 476926644
  - config_name: Ary--Wikipedia
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 10007364
        num_examples: 6703
    download_size: 4094377
    dataset_size: 10007364
  - config_name: Arz--Wikipedia
    features:
      - name: id
        dtype: string
      - name: url
        dtype: string
      - name: title
        dtype: string
      - name: text
        dtype: string
    splits:
      - name: train
        num_bytes: 1364641408
        num_examples: 1617770
    download_size: 306420318
    dataset_size: 1364641408
configs:
  - config_name: Ara--Abdelaziz--MNAD.v1
    data_files:
      - split: train
        path: Ara--Abdelaziz--MNAD.v1/train-*
  - config_name: Ara--Abdelaziz--QuranExe
    data_files:
      - split: train
        path: Ara--Abdelaziz--QuranExe/train-*
  - config_name: Ara--Abdelaziz--tweet_sentiment_multilingual
    data_files:
      - split: train
        path: Ara--Abdelaziz--tweet_sentiment_multilingual/train-*
  - config_name: Ara--Ali-C137--Hindawi-Books-dataset
    data_files:
      - split: train
        path: Ara--Ali-C137--Hindawi-Books-dataset/train-*
  - config_name: Ara--Goud--Goud-sum
    data_files:
      - split: train
        path: Ara--Goud--Goud-sum/train-*
  - config_name: Ara--MBZUAI--Bactrian-X
    data_files:
      - split: train
        path: Ara--MBZUAI--Bactrian-X/train-*
  - config_name: Ara--OpenAssistant--oasst1
    data_files:
      - split: train
        path: Ara--OpenAssistant--oasst1/train-*
  - config_name: Ara--Wikipedia
    data_files:
      - split: train
        path: Ara--Wikipedia/train-*
  - config_name: Ara--miracl--miracl
    data_files:
      - split: train
        path: Ara--miracl--miracl/train-*
  - config_name: Ara--saudinewsnet
    data_files:
      - split: train
        path: Ara--saudinewsnet/train-*
  - config_name: Ary--AbderrahmanSkiredj1--Darija-Wikipedia
    data_files:
      - split: train
        path: Ary--AbderrahmanSkiredj1--Darija-Wikipedia/train-*
  - config_name: Ary--Ali-C137--Darija-Stories-Dataset
    data_files:
      - split: train
        path: Ary--Ali-C137--Darija-Stories-Dataset/train-*
  - config_name: Ary--Wikipedia
    data_files:
      - split: train
        path: Ary--Wikipedia/train-*
  - config_name: Arz--Wikipedia
    data_files:
      - split: train
        path: Arz--Wikipedia/train-*

Dataset Card for "Mixed Arabic Datasets (MAD) Corpus"

The Mixed Arabic Datasets Corpus : A Community-Driven Collection of Diverse Arabic Texts

Dataset Description

The Mixed Arabic Datasets (MAD) presents a dynamic compilation of diverse Arabic texts sourced from various online platforms and datasets. It addresses a critical challenge faced by researchers, linguists, and language enthusiasts: the fragmentation of Arabic language datasets across the Internet. With MAD, we are trying to centralize these dispersed resources into a single, comprehensive repository.

Encompassing a wide spectrum of content, ranging from social media conversations to literary masterpieces, MAD captures the rich tapestry of Arabic communication, including both standard Arabic and regional dialects.

This corpus offers comprehensive insights into the linguistic diversity and cultural nuances of Arabic expression.

Usage

If you want to use this dataset you pick one among the available configs:

Ara--MBZUAI--Bactrian-X | Ara--OpenAssistant--oasst1 | Ary--AbderrahmanSkiredj1--Darija-Wikipedia

Ara--Wikipedia | Ary--Wikipedia | Arz--Wikipedia

Ary--Ali-C137--Darija-Stories-Dataset | Ara--Ali-C137--Hindawi-Books-dataset | ``

Example of usage:

dataset = load_dataset('M-A-D/Mixed-Arabic-Datasets-Repo', 'Ara--MBZUAI--Bactrian-X')

If you loaded multiple datasets and wanted to merge them together then you can simply laverage concatenate_datasets() from datasets

dataset3 = concatenate_datasets([dataset1['train'], dataset2['train']])

Note : proccess the datasets before merging in order to make sure you have a new dataset that is consistent

Dataset Size

The Mixed Arabic Datasets (MAD) is a dynamic and evolving collection, with its size fluctuating as new datasets are added or removed. As MAD continuously expands, it becomes a living resource that adapts to the ever-changing landscape of Arabic language datasets.

Dataset List

MAD draws from a diverse array of sources, each contributing to its richness and breadth. While the collection is constantly evolving, some of the datasets that are poised to join MAD in the near future include:

Potential Use Cases

The Mixed Arabic Datasets (MAD) holds the potential to catalyze a multitude of groundbreaking applications:

  • Linguistic Analysis: Employ MAD to conduct in-depth linguistic studies, exploring dialectal variances, language evolution, and grammatical structures.
  • Topic Modeling: Dive into diverse themes and subjects through the extensive collection, revealing insights into emerging trends and prevalent topics.
  • Sentiment Understanding: Decode sentiments spanning Arabic dialects, revealing cultural nuances and emotional dynamics.
  • Sociocultural Research: Embark on a sociolinguistic journey, unraveling the intricate connection between language, culture, and societal shifts.

Dataset Access

MAD's access mechanism is unique: while it doesn't carry a general license itself, each constituent dataset within the corpus retains its individual license. By accessing the dataset details through the provided links in the "Dataset List" section above, users can understand the specific licensing terms for each dataset.

Join Us on Discord

For discussions, contributions, and community interactions, join us on Discord! Discord

How to Contribute

Want to contribute to the Mixed Arabic Datasets project? Follow our comprehensive guide on Google Colab for step-by-step instructions: Contribution Guide.

Note: If you'd like to test a contribution before submitting it, feel free to do so on the MAD Test Dataset.

Citation

@dataset{ 
title = {Mixed Arabic Datasets (MAD)},
author = {MAD Community},
howpublished = {Dataset},
url = {https://huggingface.co/datasets/M-A-D/Mixed-Arabic-Datasets-Repo},
year = {2023},
}