codah / README.md
albertvillanova's picture
Add information to dataset card (#5)
4b0e0e7 verified
metadata
annotations_creators:
  - crowdsourced
language_creators:
  - crowdsourced
language:
  - en
license: odc-by
multilinguality:
  - monolingual
size_categories:
  - 1K<n<10K
source_datasets:
  - original
task_categories:
  - question-answering
task_ids:
  - multiple-choice-qa
paperswithcode_id: codah
pretty_name: COmmonsense Dataset Adversarially-authored by Humans
dataset_info:
  - config_name: codah
    features:
      - name: id
        dtype: int32
      - name: question_category
        dtype:
          class_label:
            names:
              '0': Idioms
              '1': Reference
              '2': Polysemy
              '3': Negation
              '4': Quantitative
              '5': Others
      - name: question_propmt
        dtype: string
      - name: candidate_answers
        sequence: string
      - name: correct_answer_idx
        dtype: int32
    splits:
      - name: train
        num_bytes: 571196
        num_examples: 2776
    download_size: 352902
    dataset_size: 571196
  - config_name: fold_0
    features:
      - name: id
        dtype: int32
      - name: question_category
        dtype:
          class_label:
            names:
              '0': Idioms
              '1': Reference
              '2': Polysemy
              '3': Negation
              '4': Quantitative
              '5': Others
      - name: question_propmt
        dtype: string
      - name: candidate_answers
        sequence: string
      - name: correct_answer_idx
        dtype: int32
    splits:
      - name: train
        num_bytes: 344900
        num_examples: 1665
      - name: validation
        num_bytes: 114199
        num_examples: 556
      - name: test
        num_bytes: 112097
        num_examples: 555
    download_size: 379179
    dataset_size: 571196
  - config_name: fold_1
    features:
      - name: id
        dtype: int32
      - name: question_category
        dtype:
          class_label:
            names:
              '0': Idioms
              '1': Reference
              '2': Polysemy
              '3': Negation
              '4': Quantitative
              '5': Others
      - name: question_propmt
        dtype: string
      - name: candidate_answers
        sequence: string
      - name: correct_answer_idx
        dtype: int32
    splits:
      - name: train
        num_bytes: 340978
        num_examples: 1665
      - name: validation
        num_bytes: 114199
        num_examples: 556
      - name: test
        num_bytes: 116019
        num_examples: 555
    download_size: 379728
    dataset_size: 571196
  - config_name: fold_2
    features:
      - name: id
        dtype: int32
      - name: question_category
        dtype:
          class_label:
            names:
              '0': Idioms
              '1': Reference
              '2': Polysemy
              '3': Negation
              '4': Quantitative
              '5': Others
      - name: question_propmt
        dtype: string
      - name: candidate_answers
        sequence: string
      - name: correct_answer_idx
        dtype: int32
    splits:
      - name: train
        num_bytes: 342281
        num_examples: 1665
      - name: validation
        num_bytes: 114199
        num_examples: 556
      - name: test
        num_bytes: 114716
        num_examples: 555
    download_size: 379126
    dataset_size: 571196
  - config_name: fold_3
    features:
      - name: id
        dtype: int32
      - name: question_category
        dtype:
          class_label:
            names:
              '0': Idioms
              '1': Reference
              '2': Polysemy
              '3': Negation
              '4': Quantitative
              '5': Others
      - name: question_propmt
        dtype: string
      - name: candidate_answers
        sequence: string
      - name: correct_answer_idx
        dtype: int32
    splits:
      - name: train
        num_bytes: 342832
        num_examples: 1665
      - name: validation
        num_bytes: 114199
        num_examples: 556
      - name: test
        num_bytes: 114165
        num_examples: 555
    download_size: 379178
    dataset_size: 571196
  - config_name: fold_4
    features:
      - name: id
        dtype: int32
      - name: question_category
        dtype:
          class_label:
            names:
              '0': Idioms
              '1': Reference
              '2': Polysemy
              '3': Negation
              '4': Quantitative
              '5': Others
      - name: question_propmt
        dtype: string
      - name: candidate_answers
        sequence: string
      - name: correct_answer_idx
        dtype: int32
    splits:
      - name: train
        num_bytes: 342832
        num_examples: 1665
      - name: validation
        num_bytes: 114165
        num_examples: 555
      - name: test
        num_bytes: 114199
        num_examples: 556
    download_size: 379178
    dataset_size: 571196
configs:
  - config_name: codah
    data_files:
      - split: train
        path: codah/train-*
  - config_name: fold_0
    data_files:
      - split: train
        path: fold_0/train-*
      - split: validation
        path: fold_0/validation-*
      - split: test
        path: fold_0/test-*
  - config_name: fold_1
    data_files:
      - split: train
        path: fold_1/train-*
      - split: validation
        path: fold_1/validation-*
      - split: test
        path: fold_1/test-*
  - config_name: fold_2
    data_files:
      - split: train
        path: fold_2/train-*
      - split: validation
        path: fold_2/validation-*
      - split: test
        path: fold_2/test-*
  - config_name: fold_3
    data_files:
      - split: train
        path: fold_3/train-*
      - split: validation
        path: fold_3/validation-*
      - split: test
        path: fold_3/test-*
  - config_name: fold_4
    data_files:
      - split: train
        path: fold_4/train-*
      - split: validation
        path: fold_4/validation-*
      - split: test
        path: fold_4/test-*

Dataset Card for COmmonsense Dataset Adversarially-authored by Humans

Table of Contents

Dataset Description

Dataset Summary

The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions.

Supported Tasks and Leaderboards

[More Information Needed]

Languages

[More Information Needed]

Dataset Structure

Data Instances

[More Information Needed]

Data Fields

[More Information Needed]

Data Splits

[More Information Needed]

Dataset Creation

Curation Rationale

[More Information Needed]

Source Data

[More Information Needed]

Initial Data Collection and Normalization

[More Information Needed]

Who are the source language producers?

[More Information Needed]

Annotations

[More Information Needed]

Annotation process

[More Information Needed]

Who are the annotators?

[More Information Needed]

Personal and Sensitive Information

[More Information Needed]

Considerations for Using the Data

Social Impact of Dataset

[More Information Needed]

Discussion of Biases

[More Information Needed]

Other Known Limitations

[More Information Needed]

Additional Information

Dataset Curators

[More Information Needed]

Licensing Information

The CODAH dataset is made available under the Open Data Commons Attribution License: http://opendatacommons.org/licenses/by/1.0/

Citation Information

@inproceedings{chen-etal-2019-codah,
    title = "{CODAH}: An Adversarially-Authored Question Answering Dataset for Common Sense",
    author = "Chen, Michael  and
      D{'}Arcy, Mike  and
      Liu, Alisa  and
      Fernandez, Jared  and
      Downey, Doug",
    editor = "Rogers, Anna  and
      Drozd, Aleksandr  and
      Rumshisky, Anna  and
      Goldberg, Yoav",
    booktitle = "Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for {NLP}",
    month = jun,
    year = "2019",
    address = "Minneapolis, USA",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/W19-2008",
    doi = "10.18653/v1/W19-2008",
    pages = "63--69",
    abstract = "Commonsense reasoning is a critical AI capability, but it is difficult to construct challenging datasets that test common sense. Recent neural question answering systems, based on large pre-trained models of language, have already achieved near-human-level performance on commonsense knowledge benchmarks. These systems do not possess human-level common sense, but are able to exploit limitations of the datasets to achieve human-level scores. We introduce the CODAH dataset, an adversarially-constructed evaluation dataset for testing common sense. CODAH forms a challenging extension to the recently-proposed SWAG dataset, which tests commonsense knowledge using sentence-completion questions that describe situations observed in video. To produce a more difficult dataset, we introduce a novel procedure for question acquisition in which workers author questions designed to target weaknesses of state-of-the-art neural question answering systems. Workers are rewarded for submissions that models fail to answer correctly both before and after fine-tuning (in cross-validation). We create 2.8k questions via this procedure and evaluate the performance of multiple state-of-the-art question answering systems on our dataset. We observe a significant gap between human performance, which is 95.3{\%}, and the performance of the best baseline accuracy of 65.3{\%} by the OpenAI GPT model.",
}

Contributions

Thanks to @patil-suraj for adding this dataset.