|
--- |
|
language: |
|
- ar |
|
license: apache-2.0 |
|
size_categories: |
|
- 1M<n<10M |
|
task_categories: |
|
- text-classification |
|
- translation |
|
- summarization |
|
pretty_name: 2A |
|
dataset_info: |
|
- config_name: CohereForAI-aya_collection-aya_dataset |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: string |
|
- name: language_code |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 7555482 |
|
num_examples: 13960 |
|
download_size: 3687445 |
|
dataset_size: 7555482 |
|
- config_name: CohereForAI-aya_collection-aya_human_annotated |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: test |
|
num_bytes: 222650 |
|
num_examples: 250 |
|
download_size: 120393 |
|
dataset_size: 222650 |
|
- config_name: CohereForAI-aya_collection-templated_afrisenti |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 5070578 |
|
num_examples: 14468 |
|
- name: test |
|
num_bytes: 2674428 |
|
num_examples: 7838 |
|
- name: validation |
|
num_bytes: 643036 |
|
num_examples: 1816 |
|
download_size: 2330165 |
|
dataset_size: 8388042 |
|
- config_name: CohereForAI-aya_collection-templated_mintaka |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 20413129 |
|
num_examples: 70000 |
|
- name: test |
|
num_bytes: 5799667 |
|
num_examples: 20000 |
|
- name: validation |
|
num_bytes: 2976183 |
|
num_examples: 10000 |
|
download_size: 6746433 |
|
dataset_size: 29188979 |
|
- config_name: CohereForAI-aya_collection-templated_ntx_llm |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 199809 |
|
num_examples: 111 |
|
download_size: 34306 |
|
dataset_size: 199809 |
|
- config_name: CohereForAI-aya_collection-templated_xcsqa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: validation |
|
num_bytes: 393580 |
|
num_examples: 1000 |
|
download_size: 137233 |
|
dataset_size: 393580 |
|
- config_name: CohereForAI-aya_collection-templated_xlel_wd |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 97691354 |
|
num_examples: 90760 |
|
- name: test |
|
num_bytes: 15499274 |
|
num_examples: 14791 |
|
- name: validation |
|
num_bytes: 10752041 |
|
num_examples: 9768 |
|
download_size: 57959575 |
|
dataset_size: 123942669 |
|
- config_name: CohereForAI-aya_collection-translated_adversarial_qa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 147727007 |
|
num_examples: 100000 |
|
- name: test |
|
num_bytes: 16108000 |
|
num_examples: 10000 |
|
- name: validation |
|
num_bytes: 14862183 |
|
num_examples: 10000 |
|
download_size: 52642775 |
|
dataset_size: 178697190 |
|
- config_name: CohereForAI-aya_collection-translated_cnn_dailymail |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 3578924407 |
|
num_examples: 1000000 |
|
- name: test |
|
num_bytes: 415594340 |
|
num_examples: 114900 |
|
- name: validation |
|
num_bytes: 486698663 |
|
num_examples: 133680 |
|
download_size: 2209523190 |
|
dataset_size: 4481217410 |
|
- config_name: CohereForAI-aya_collection-translated_dolly |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: gcp_source |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: alphabet |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 213140804 |
|
num_examples: 148080 |
|
download_size: 96189154 |
|
dataset_size: 213140804 |
|
- config_name: CohereForAI-aya_collection-translated_flan_coqa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 245744048 |
|
num_examples: 64090 |
|
download_size: 124335769 |
|
dataset_size: 245744048 |
|
- config_name: CohereForAI-aya_collection-translated_flan_gem_wiki |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 961863533.277311 |
|
num_examples: 271470 |
|
download_size: 485152798 |
|
dataset_size: 961863533.277311 |
|
- config_name: CohereForAI-aya_collection-translated_flan_qa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 2989244 |
|
num_examples: 5400 |
|
download_size: 1292664 |
|
dataset_size: 2989244 |
|
- config_name: CohereForAI-aya_collection-translated_joke_explaination |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 8219049 |
|
num_examples: 7540 |
|
download_size: 3600136 |
|
dataset_size: 8219049 |
|
- config_name: CohereForAI-aya_collection-translated_mintaka |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 40908047 |
|
num_examples: 140000 |
|
- name: test |
|
num_bytes: 11646781 |
|
num_examples: 40000 |
|
- name: validation |
|
num_bytes: 5951801 |
|
num_examples: 20000 |
|
download_size: 12723211 |
|
dataset_size: 58506629 |
|
- config_name: CohereForAI-aya_collection-translated_mlqa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: test |
|
num_bytes: 331062576 |
|
num_examples: 231800 |
|
- name: validation |
|
num_bytes: 31900260 |
|
num_examples: 22960 |
|
download_size: 146571384 |
|
dataset_size: 362962836 |
|
- config_name: CohereForAI-aya_collection-translated_nqopen |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 397677612 |
|
num_examples: 1758500 |
|
- name: validation |
|
num_bytes: 16780970 |
|
num_examples: 72200 |
|
download_size: 136208663 |
|
dataset_size: 414458582 |
|
- config_name: CohereForAI-aya_collection-translated_paws |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 303643575 |
|
num_examples: 494010 |
|
- name: test |
|
num_bytes: 49242541 |
|
num_examples: 80000 |
|
- name: validation |
|
num_bytes: 49475307 |
|
num_examples: 80000 |
|
download_size: 66436419 |
|
dataset_size: 402361423 |
|
- config_name: CohereForAI-aya_collection-translated_piqa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 113290227 |
|
num_examples: 161130 |
|
- name: validation |
|
num_bytes: 12924744 |
|
num_examples: 18380 |
|
download_size: 45954644 |
|
dataset_size: 126214971 |
|
- config_name: CohereForAI-aya_collection-translated_wikiqa |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: dataset_name |
|
dtype: string |
|
- name: sub_dataset_name |
|
dtype: string |
|
- name: task_type |
|
dtype: string |
|
- name: template_id |
|
dtype: int64 |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: split |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 5014300 |
|
num_examples: 10400 |
|
- name: test |
|
num_bytes: 1378807 |
|
num_examples: 2930 |
|
- name: validation |
|
num_bytes: 685770 |
|
num_examples: 1400 |
|
download_size: 2872586 |
|
dataset_size: 7078877 |
|
- config_name: CohereForAI-aya_dataset |
|
features: |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: language |
|
dtype: string |
|
- name: language_code |
|
dtype: string |
|
- name: annotation_type |
|
dtype: string |
|
- name: user_id |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 8314232 |
|
num_examples: 13960 |
|
- name: test |
|
num_bytes: 246400 |
|
num_examples: 250 |
|
download_size: 3778631 |
|
dataset_size: 8560632 |
|
- config_name: CohereForAI-aya_evaluation_suite-aya_human_annotated |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: test |
|
num_bytes: 222650 |
|
num_examples: 250 |
|
download_size: 120393 |
|
dataset_size: 222650 |
|
- config_name: CohereForAI-aya_evaluation_suite-dolly_human_edited |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: source_id |
|
dtype: int64 |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: test |
|
num_bytes: 188495 |
|
num_examples: 200 |
|
download_size: 100291 |
|
dataset_size: 188495 |
|
- config_name: CohereForAI-aya_evaluation_suite-dolly_machine_translated |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: inputs |
|
dtype: string |
|
- name: targets |
|
dtype: string |
|
- name: language |
|
dtype: string |
|
- name: script |
|
dtype: string |
|
- name: source_id |
|
dtype: int64 |
|
- name: __index_level_0__ |
|
dtype: int64 |
|
splits: |
|
- name: test |
|
num_bytes: 3491803 |
|
num_examples: 2000 |
|
download_size: 1762303 |
|
dataset_size: 3491803 |
|
configs: |
|
- config_name: CohereForAI-aya_collection-aya_dataset |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-aya_dataset/train-* |
|
- config_name: CohereForAI-aya_collection-aya_human_annotated |
|
data_files: |
|
- split: test |
|
path: CohereForAI-aya_collection-aya_human_annotated/test-* |
|
- config_name: CohereForAI-aya_collection-templated_afrisenti |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-templated_afrisenti/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-templated_afrisenti/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-templated_afrisenti/validation-* |
|
- config_name: CohereForAI-aya_collection-templated_mintaka |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-templated_mintaka/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-templated_mintaka/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-templated_mintaka/validation-* |
|
- config_name: CohereForAI-aya_collection-templated_ntx_llm |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-templated_ntx_llm/train-* |
|
- config_name: CohereForAI-aya_collection-templated_xcsqa |
|
data_files: |
|
- split: validation |
|
path: CohereForAI-aya_collection-templated_xcsqa/validation-* |
|
- config_name: CohereForAI-aya_collection-templated_xlel_wd |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-templated_xlel_wd/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-templated_xlel_wd/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-templated_xlel_wd/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_adversarial_qa |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_adversarial_qa/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-translated_adversarial_qa/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_adversarial_qa/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_cnn_dailymail |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_cnn_dailymail/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-translated_cnn_dailymail/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_cnn_dailymail/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_dolly |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_dolly/train-* |
|
- config_name: CohereForAI-aya_collection-translated_flan_coqa |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_flan_coqa/train-* |
|
- config_name: CohereForAI-aya_collection-translated_flan_gem_wiki |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_flan_gem_wiki/train-* |
|
- config_name: CohereForAI-aya_collection-translated_flan_qa |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_flan_qa/train-* |
|
- config_name: CohereForAI-aya_collection-translated_joke_explaination |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_joke_explaination/train-* |
|
- config_name: CohereForAI-aya_collection-translated_mintaka |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_mintaka/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-translated_mintaka/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_mintaka/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_mlqa |
|
data_files: |
|
- split: test |
|
path: CohereForAI-aya_collection-translated_mlqa/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_mlqa/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_nqopen |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_nqopen/train-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_nqopen/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_paws |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_paws/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-translated_paws/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_paws/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_piqa |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_piqa/train-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_piqa/validation-* |
|
- config_name: CohereForAI-aya_collection-translated_wikiqa |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_collection-translated_wikiqa/train-* |
|
- split: test |
|
path: CohereForAI-aya_collection-translated_wikiqa/test-* |
|
- split: validation |
|
path: CohereForAI-aya_collection-translated_wikiqa/validation-* |
|
- config_name: CohereForAI-aya_dataset |
|
data_files: |
|
- split: train |
|
path: CohereForAI-aya_dataset/train-* |
|
- split: test |
|
path: CohereForAI-aya_dataset/test-* |
|
- config_name: CohereForAI-aya_evaluation_suite-aya_human_annotated |
|
data_files: |
|
- split: test |
|
path: CohereForAI-aya_evaluation_suite-aya_human_annotated/test-* |
|
- config_name: CohereForAI-aya_evaluation_suite-dolly_human_edited |
|
data_files: |
|
- split: test |
|
path: CohereForAI-aya_evaluation_suite-dolly_human_edited/test-* |
|
- config_name: CohereForAI-aya_evaluation_suite-dolly_machine_translated |
|
data_files: |
|
- split: test |
|
path: CohereForAI-aya_evaluation_suite-dolly_machine_translated/test-* |
|
--- |
|
# Dataset Card for : Arabic Aya (2A) |
|
|
|
<!-- Provide a quick summary of the dataset. --> |
|
|
|
<!-- This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).--> |
|
|
|
## **Arabic Aya (2A) : A Curated Subset of the Aya Collection for Arabic Language Processing** |
|
|
|
### Dataset Sources & Infos |
|
- **Data Origin**: Derived from 69 subsets of the original Aya datasets : [CohereForAI/aya_collection](https://huggingface.co/datasets/CohereForAI/aya_collection), [CohereForAI/aya_dataset](https://huggingface.co/datasets/CohereForAI/aya_dataset), and [CohereForAI/aya_evaluation_suite](https://huggingface.co/datasets/CohereForAI/aya_evaluation_suite). |
|
- **Languages**: Modern Standard Arabic (MSA) and a variety of Arabic dialects ( 'arb', 'arz', 'ary', 'ars', 'knc', 'acm', 'apc', 'aeb', 'ajp', 'acq' ) |
|
- **Applications**: `Language Modeling`, `Text Classification`, `Sentiment Analysis`, `Dialect Identification`, `Translation` |
|
- **Paper:** [2402.06619](https://huggingface.co/papers/2402.06619) |
|
- **Maintainer:** [Elfilali Ali](https://huggingface.co/Ali-C137) |
|
- **License:** Apache-2.0 |
|
|
|
### Overview |
|
`Arabic Aya` is a meticulously curated dataset derived from the comprehensive Aya collection by [CohereForAI](https://huggingface.co/CohereForAI), specifically focusing on Arabic text data. This dataset aggregates content from the [CohereForAI/aya_collection](https://huggingface.co/datasets/CohereForAI/aya_collection), [CohereForAI/aya_dataset](https://huggingface.co/datasets/CohereForAI/aya_dataset), and [CohereForAI/aya_evaluation_suite](https://huggingface.co/datasets/CohereForAI/aya_evaluation_suite), filtering out all but the Arabic content, including both Modern Standard Arabic (MSA) and various regional dialects. |
|
|
|
### Purpose |
|
The aim of 'Arabic Aya' is to provide researchers, technologists, and linguists with a ready-to-use Arabic text resource, significantly reducing the time and effort required for data preprocessing in NLP and AI projects focused on the Arabic language. |
|
- Use the Aya datasets out of the box for your Arabic applications and research 😀 |
|
|
|
### Usage |
|
This dataset serves as a foundational tool for those embarking on Arabic language projects, from academic research to commercial applications. By providing a pre-filtered source of Arabic text, 'Arabic Aya' enables users to dive straight into model training, analysis, and application development without the preliminary hassle of data cleaning and language filtering. |
|
|
|
#### Use with HuggingFace's datasets library |
|
To load this dataset with Datasets, you'll need to install Datasets as `pip install datasets --upgrade` and then use a similar code to the following: |
|
|
|
```python |
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("2A2I/Arabic_Aya", "CohereForAI-aya_collection-templated_mintaka") |
|
``` |
|
In the above code snippet, "CohereForAI-aya_collection-templated_mintaka" refers to the arabic version (100k rows) of the original "templated_mintaka" subset (780k rows) of the aya_collection. You can load other subsets by specifying its name at the time of loading the dataset. |
|
|
|
|
|
### Access and Contribution |
|
Available on the Hugging Face Hub under [2A2I/Arabic_Aya](https://huggingface.co/datasets/2A2I/Arabic_Aya), 'Arabic Aya' invites contributions from the community. Users are encouraged to offer feedback, suggest improvements. |
|
|
|
### Support and Collaboration |
|
We are committed to fostering an inclusive and supportive environment around Arabic AI and NLP research. For support, collaboration, or queries regarding the dataset, please reach out through the Hugging Face Hub's discussion section or reach out at [2A2I Contact Email](arabic.ai.initiative@gmail.com). |
|
|
|
|
|
|
|
|
|
# Original Dataset Card of Aya by CohereForAI |
|
|
|
|
|
![Aya Header](https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/aya_header.png) |
|
|
|
# Dataset Summary |
|
The Aya Collection is a massive multilingual collection consisting of 513 million instances of prompts and completions covering a wide range of tasks. |
|
This collection incorporates instruction-style templates from fluent speakers and applies them to a curated list of datasets, as well as translations of instruction-style datasets into 101 languages. Aya Dataset, a human-curated multilingual instruction and response dataset, is also part of this collection. See our paper for more details regarding the collection. |
|
|
|
- **Curated by:** Contributors of [Aya Open Science Intiative](https://cohere.com/research/aya) |
|
- **Language(s):** 115 languages |
|
- **License:** [Apache 2.0](https://opensource.org/license/apache-2-0) |
|
|
|
- **Aya Datasets Family:** |
|
| Name | Explanation | |
|
|------|--------------| |
|
| [aya_dataset](https://huggingface.co/datasets/CohereForAI/aya_dataset) | Human-annotated multilingual instruction finetuning dataset, comprising over 204K instances across 65 languages. | |
|
| [aya_collection](https://huggingface.co/datasets/CohereForAI/aya_collection) | Created by applying instruction-style templates from fluent speakers to 44 datasets, including translations of 19 instruction-style datasets into 101 languages.| |
|
| [aya_evaluation_suite](https://huggingface.co/datasets/CohereForAI/aya_evaluation_suite) | A diverse evaluation set for multilingual open-ended generation, featuring 250 culturally grounded prompts in 7 languages, 200 translated prompts in 24 languages, and human-edited versions selected for cross-cultural relevance from English Dolly in 6 languages.| |
|
|
|
|
|
# Dataset |
|
The `Aya Collection` is a comprehensive, large corpus of datasets that can be used by researchers around the world to train multilingual models. Our goal is only to include datasets with permissive licensing for manipulation and redistribution. |
|
|
|
The `Aya Collection` consists of three different sources of data: |
|
|
|
1. Templated data: We collaborated with fluent speakers to create templates that allowed for the automatic expansion of existing datasets into various languages. |
|
2. Translated data: We translated a hand-selected subset of 19 datasets into 101 languages (114 dialects) using the NLLB 3.3B parameter machine translation model. |
|
3. Aya Dataset: We release the [Aya Dataset](https://huggingface.co/datasets/CohereForAI/aya_dataset) as a subset of the overall collection. This is the only dataset in the collection that is human-annotated in its entirety. |
|
|
|
## Load with Datasets |
|
To load this dataset with Datasets, you'll need to install Datasets as `pip install datasets --upgrade` and then use the following code: |
|
|
|
```python |
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("CohereForAI/aya_collection", "templated_mintaka") |
|
``` |
|
In the above code snippet, "templated_mintaka" refers to a subset of the aya_collection. You can load other subsets by specifying its name at the time of loading the dataset. |
|
|
|
## Data Instances |
|
An example of a `train` instance looks as follows: |
|
```json |
|
{'id': 246001, |
|
'inputs': 'The following query in English is taken from the geography category. What could be the answer to the question?\nWhat is the seventh tallest mountain in North America?', |
|
'targets': 'The answer is Mount Lucania.', |
|
'dataset_name': 'Mintaka-inst', |
|
'sub_dataset_name': '-', |
|
'task_type': 'question-answering', |
|
'template_id': 3, |
|
'language': 'eng', |
|
'split': 'train', |
|
'script': 'Latn' |
|
} |
|
``` |
|
|
|
## Data Fields |
|
The data fields are the same among all splits: |
|
- `id:` Unique id of the data point |
|
- `inputs:` Prompt or input to the language model. |
|
- `targets:` Completion or output of the language model. |
|
- `dataset_name:` The name of the source dataset that the data point was taken from |
|
- `sub_dataset_name:` If the source is a collection, this field indicates which part of that collection the data point was taken from. If it is not a collection, this field is left blank. |
|
- `task_type:` The task type that this conversation belongs to. |
|
- `template_id`: The id of the template applied to this data point. |
|
- `language:` The ISO code of the dialect of the conversation. |
|
- `script:` The script of the language. |
|
- `split:` Indicates whether the data point is part of the `train` or the `test` split. |
|
|
|
|
|
|
|
### Statistics |
|
The total number of data points, including the Aya Dataset` is 513,758,189. To view the breakdown of dialect codes and the respective templated and translated data point counts in the Aya Collection , refer to the toggled table below. |
|
|
|
<details> |
|
<summary> <b> Breakdown of Aya Collection data point counts grouped by dialects </b> </summary> |
|
|
|
|dialect code|language|translated data point count|templated data point count|total count | |
|
|------------|--------|---------------------------|--------------------------|---------------| |
|
|ace |Achinese|8240684 |2000 |8242684 | |
|
|acm |Arabic |4120342 |0 |4120342 | |
|
|acq |Arabic |4120342 |0 |4120342 | |
|
|aeb |Arabic |4120342 |0 |4120342 | |
|
|afr |Afrikaans|4120342 |6108 |4126450 | |
|
|ajp |Arabic |4120342 |0 |4120342 | |
|
|als |Albanian|4120342 |0 |4120342 | |
|
|amh |Amharic |4120342 |25327 |4145669 | |
|
|apc |Arabic |4120342 |0 |4120342 | |
|
|arb |Arabic |6424999 |216430 |6641429 | |
|
|ars |Arabic |4120342 |0 |4120342 | |
|
|ary |Arabic |4120342 |18076 |4138418 | |
|
|arz |Arabic |4120342 |0 |4120342 | |
|
|azb |Azerbaijani|4120342 |0 |4120342 | |
|
|azj |Azerbaijani|4120342 |0 |4120342 | |
|
|bel |Belarusian|4120342 |21273 |4141615 | |
|
|ben |Bengali |4120342 |30661 |4151003 | |
|
|bjn |Banjar |8240684 |2000 |8242684 | |
|
|bul |Bulgarian|4120342 |37722 |4158064 | |
|
|cat |Catalan |4120342 |66900 |4187242 | |
|
|ceb |Cebuano |4120342 |0 |4120342 | |
|
|ces |Czech |4120342 |179604 |4299946 | |
|
|ckb |Kurdish |4120342 |0 |4120342 | |
|
|cym |Welsh |4120342 |0 |4120342 | |
|
|dan |Danish |4120342 |36310 |4156652 | |
|
|deu |German |4120342 |1326722 |5447064 | |
|
|ell |Greek |4120342 |40291 |4160633 | |
|
|eng |English |9771427 |8066678 |17838105 | |
|
|epo |Esperanto|4120342 |0 |4120342 | |
|
|est |Estonian|4120342 |0 |4120342 | |
|
|eus |Basque |4120342 |0 |4120342 | |
|
|fin |Finnish |4120342 |457895 |4578237 | |
|
|fra |French |4120342 |835520 |4955862 | |
|
|gla |Scottish Gaelic|4120342 |0 |4120342 | |
|
|gle |Irish |4120342 |0 |4120342 | |
|
|glg |Galician|4120342 |0 |4120342 | |
|
|guj |Gujarati|4120342 |2157 |4122499 | |
|
|hat |Haitian Creole|4120342 |0 |4120342 | |
|
|hau |Hausa |4120342 |51396 |4171738 | |
|
|heb |Hebrew |4120342 |103466 |4223808 | |
|
|hin |Hindi |4120342 |260387 |4380729 | |
|
|hun |Hungarian|4120342 |82039 |4202381 | |
|
|hye |Armenian|4120342 |7080 |4127422 | |
|
|ibo |Igbo |4120342 |36312 |4156654 | |
|
|ind |Indonesian|4120342 |45709 |4166051 | |
|
|isl |Icelandic|4120342 |0 |4120342 | |
|
|ita |Italian |4120342 |405682 |4526024 | |
|
|jav |Javanese|4120342 |829 |4121171 | |
|
|jpn |Japanese|4120342 |2693177 |6813519 | |
|
|kan |Kannada |4120342 |1156 |4121498 | |
|
|kas |Kashmiri|4120342 |0 |4120342 | |
|
|kat |Georgian|4120342 |0 |4120342 | |
|
|kaz |Kazakh |4120342 |0 |4120342 | |
|
|khk |Mongolian|4120342 |0 |4120342 | |
|
|khm |Khmer |4120342 |0 |4120342 | |
|
|kir |Kyrgyz |4120342 |0 |4120342 | |
|
|kmr |Kurdish |4120342 |0 |4120342 | |
|
|knc |Kanuri |8240684 |0 |8240684 | |
|
|kor |Korean |4120342 |41011 |4161353 | |
|
|lao |Lao |4120342 |0 |4120342 | |
|
|lit |Lithuanian|4120342 |0 |4120342 | |
|
|ltz |Luxembourgish|4120342 |0 |4120342 | |
|
|lvs |Latvian |4120342 |0 |4120342 | |
|
|mal |Malayalam|4120342 |4347 |4124689 | |
|
|mar |Marathi |4120342 |3678 |4124020 | |
|
|min |Minangkabau|6753788 |2000 |6755788 | |
|
|mkd |Macedonian|4120342 |0 |4120342 | |
|
|mlt |Maltese |4120342 |0 |4120342 | |
|
|mni |Manipuri|4120342 |0 |4120342 | |
|
|mri |Maori |4120342 |0 |4120342 | |
|
|mya |Burmese |4120342 |0 |4120342 | |
|
|nld |Dutch |4120342 |220181 |4340523 | |
|
|nno |Norwegian|4120342 |0 |4120342 | |
|
|nob |Norwegian|4120342 |0 |4120342 | |
|
|npi |Nepali |4120342 |0 |4120342 | |
|
|nso |Northern Sotho|4120342 |0 |4120342 | |
|
|pbt |Pashto |4120342 |0 |4120342 | |
|
|pes |Persian |4120342 |245520 |4365862 | |
|
|plt |Malagasy|4120342 |0 |4120342 | |
|
|pol |Polish |4120342 |332503 |4452845 | |
|
|por |Portuguese|4120342 |287432 |4407774 | |
|
|ron |Romanian|4120342 |36359 |4156701 | |
|
|rus |Russian |4120342 |545920 |4666262 | |
|
|sin |Sinhala |4120342 |195 |4120537 | |
|
|slk |Slovak |4120342 |27845 |4148187 | |
|
|slv |Slovenian|4120342 |25731 |4146073 | |
|
|smo |Samoan |4120342 |0 |4120342 | |
|
|sna |Shona |4120342 |3684 |4124026 | |
|
|snd |Sindhi |4120342 |0 |4120342 | |
|
|som |Somali |4120342 |2926 |4123268 | |
|
|sot |Southern Sotho|4120342 |0 |4120342 | |
|
|spa |Spanish |4120342 |379194 |4499536 | |
|
|srp |Serbian |4120342 |77124 |4197466 | |
|
|sun |Sundanese|4120342 |2208 |4122550 | |
|
|swe |Swedish |4120342 |76486 |4196828 | |
|
|swh |Swahili |4120342 |12726 |4133068 | |
|
|tam |Tamil |4120342 |11462 |4131804 | |
|
|taq |Tamasheq|4120342 |0 |4120342 | |
|
|tel |Telugu |4120342 |477821 |4598163 | |
|
|tgk |Tajik |4120342 |0 |4120342 | |
|
|tha |Thai |4120342 |2125180 |6245522 | |
|
|tur |Turkish |4120342 |59932 |4180274 | |
|
|ukr |Ukrainian|4120342 |189384 |4309726 | |
|
|urd |Urdu |4120342 |337739 |4458081 | |
|
|uzn |Uzbek |4120342 |0 |4120342 | |
|
|vie |Vietnamese|4120342 |42232 |4162574 | |
|
|xho |Xhosa |4120342 |2952 |4123294 | |
|
|ydd |Yiddish |4120342 |0 |4120342 | |
|
|yor |Yoruba |4120342 |4907 |4125249 | |
|
|yue |Chinese |4120342 |0 |4120342 | |
|
|zho-Hans |Chinese |4120342 |54528 |4174870 | |
|
|zho-Hant |Chinese |4120342 |0 |4120342 | |
|
|zsm |Malay |4120342 |13950 |4134292 | |
|
|zul |Zulu |4120342 |786 |4121128 | |
|
|arq |Arabic |0 |6046 |6046 | |
|
|ban |Balinese|0 |2000 |2000 | |
|
|bbc |Toba Batak|0 |2000 |2000 | |
|
|bem |Bemba |0 |776 |776 | |
|
|fil |Filipino|0 |220 |220 | |
|
|fon |Fon |0 |845 |845 | |
|
|hrv |Croatian|0 |9007 |9007 | |
|
|kin |Kinyarwanda|0 |11165 |11165 | |
|
|lij |Ligurian|0 |6409 |6409 | |
|
|mad |Madurese|0 |2000 |2000 | |
|
|nij |Ngaju |0 |2000 |2000 | |
|
|nor |Norwegian|0 |72352 |72352 | |
|
|pan |Punjabi |0 |2156 |2156 | |
|
|twi |Twi |0 |10840 |10840 | |
|
|wol |Wolof |0 |785 |785 | |
|
|zho |Chinese |0 |74972 |74972 | |
|
|
|
PS: Templated data also includes Mozambican Portuguese, which doesn't have its own ISO language code. |
|
|
|
</details> |
|
|
|
<br> |
|
|
|
|
|
# Motivations & Intentions |
|
- **Curation Rationale:** Automatic augmentation of existing datasets serves to enhance the available linguistic resources for multiple languages. The list of languages was initially established from mT5 and aligned with the annotators’ language list and NLLB translation model. The datasets were translated directly from English for all languages. |
|
|
|
|
|
# Additional Information |
|
## Provenance |
|
- **Methods Used:** A combination of crowd-sourced templating and automatic translation was employed to source this dataset. |
|
- **Methodology Details:** |
|
- *Source:* Existing NLP datasets |
|
- *Dates of Collection:* May 2023 - Dec 2023 |
|
|
|
|
|
## Dataset Version and Maintenance |
|
- **Maintenance Status:** Actively Maintained |
|
- **Version Details:** |
|
- *Current version:* 1.0 |
|
- *Last Update:* 02/2024 |
|
- *First Release:* 02/2024 |
|
|
|
|
|
## Authorship |
|
- **Publishing Organization:** [Cohere For AI](https://cohere.com/research) |
|
- **Industry Type:** Not-for-profit - Tech |
|
- **Contact Details:** https://cohere.com/research/aya |
|
|
|
|
|
## Licensing Information |
|
This dataset can be used for any purpose, whether academic or commercial, under the terms of the [Apache 2.0](https://opensource.org/license/apache-2-0) License. |
|
|
|
|
|
## Citation Information |
|
```bibtex |
|
@misc{singh2024aya, |
|
title={Aya Dataset: An Open-Access Collection for Multilingual Instruction Tuning}, |
|
author={Shivalika Singh and Freddie Vargus and Daniel Dsouza and Börje F. Karlsson and Abinaya Mahendiran and Wei-Yin Ko and Herumb Shandilya and Jay Patel and Deividas Mataciunas and Laura OMahony and Mike Zhang and Ramith Hettiarachchi and Joseph Wilson and Marina Machado and Luisa Souza Moura and Dominik Krzemiński and Hakimeh Fadaei and Irem Ergün and Ifeoma Okoh and Aisha Alaagib and Oshan Mudannayake and Zaid Alyafeai and Vu Minh Chien and Sebastian Ruder and Surya Guthikonda and Emad A. Alghamdi and Sebastian Gehrmann and Niklas Muennighoff and Max Bartolo and Julia Kreutzer and Ahmet Üstün and Marzieh Fadaee and Sara Hooker}, |
|
year={2024}, |
|
eprint={2402.06619}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
|
|
``` |
|
|