{ "overview": { "where": { "has-leaderboard": "no", "leaderboard-url": "N/A", "leaderboard-description": "N/A", "website": "[Website](http://abductivecommonsense.xyz/)", "data-url": "[Google Storage](https://storage.googleapis.com/ai2-mosaic/public/abductive-commonsense-reasoning-iclr2020/anlg.zip)", "paper-url": "[OpenReview](https://openreview.net/pdf?id=Byg1v1HKDB)", "paper-bibtext": "```\n@inproceedings{\nBhagavatula2020Abductive,\ntitle={Abductive Commonsense Reasoning},\nauthor={Chandra Bhagavatula and Ronan Le Bras and Chaitanya Malaviya and Keisuke Sakaguchi and Ari Holtzman and Hannah Rashkin and Doug Downey and Wen-tau Yih and Yejin Choi},\nbooktitle={International Conference on Learning Representations},\nyear={2020},\nurl={https://openreview.net/forum?id=Byg1v1HKDB}\n}\n```", "contact-name": "Chandra Bhagavatulla", "contact-email": "chandrab@allenai.org" }, "languages": { "is-multilingual": "no", "license": "apache-2.0: Apache License 2.0", "task-other": "N/A", "language-names": [ "English" ], "language-speakers": "Crowdworkers on the Amazon Mechanical Turk platform based in the U.S, Canada, U.K and Australia. ", "intended-use": "To study the viability of language-based abductive reasoning. Training and evaluating models to generate a plausible hypothesis to explain two given observations.", "license-other": "N/A", "task": "Reasoning" }, "credit": { "organization-type": [ "industry" ], "organization-names": "Allen Institute for AI", "creators": "Chandra Bhagavatula (AI2), Ronan Le Bras (AI2), Chaitanya Malaviya (AI2), Keisuke Sakaguchi (AI2), Ari Holtzman (AI2, UW), Hannah Rashkin (AI2, UW), Doug Downey (AI2), Wen-tau Yih (AI2), Yejin Choi (AI2, UW)", "funding": "Allen Institute for AI", "gem-added-by": "Chandra Bhagavatula (AI2), Ronan LeBras (AI2), Aman Madaan (CMU), Nico Daheim (RWTH Aachen University)" }, "structure": { "data-fields": "- `observation_1`: A string describing an observation / event.\n- `observation_2`: A string describing an observation / event.\n- `label`: A string that plausibly explains why observation_1 and observation_2 might have happened.", "structure-labels": "Explanations were authored by crowdworkers on the Amazon Mechanical Turk platform using a custom template designed by the creators of the dataset.", "structure-example": "```\n{\n'gem_id': 'GEM-ART-validation-0',\n'observation_1': 'Stephen was at a party.',\n'observation_2': 'He checked it but it was completely broken.',\n'label': 'Stephen knocked over a vase while drunk.'\n}\n```", "structure-splits": "- `train`: Consists of training instances. \n- `dev`: Consists of dev instances.\n- `test`: Consists of test instances.\n" }, "what": { "dataset": "Abductive reasoning is inference to the most plausible explanation. For example, if Jenny finds her house in a mess when she returns from work, and remembers that she left a window open, she can hypothesize that a thief broke into her house and caused the mess, as the most plausible explanation.\nThis data loader focuses on abductive NLG: a conditional English generation task for explaining given observations in natural language. " } }, "gem": { "rationale": { "contribution": "Abductive reasoning is a crucial capability of humans and ART is the first dataset curated to study language-based abductive reasoning.", "sole-task-dataset": "no", "distinction-description": "N/A", "model-ability": "Whether models can reason abductively about a given pair of observations." }, "curation": { "has-additional-curation": "no", "modification-types": [], "modification-description": "N/A", "has-additional-splits": "no", "additional-splits-description": "N/A", "additional-splits-capacicites": "N/A" }, "starting": { "research-pointers": "- [Paper](https://arxiv.org/abs/1908.05739)\n- [Code](https://github.com/allenai/abductive-commonsense-reasoning)" } }, "results": { "results": { "model-abilities": "Whether models can reason abductively about a given pair of observations.", "metrics": [ "BLEU", "BERT-Score", "ROUGE" ], "other-metrics-definitions": "N/A", "has-previous-results": "no", "current-evaluation": "N/A", "previous-results": "N/A" } }, "curation": { "original": { "is-aggregated": "no", "aggregated-sources": "N/A" }, "language": { "obtained": [ "Crowdsourced" ], "found": [], "crowdsourced": [ "Amazon Mechanical Turk" ], "created": "N/A", "machine-generated": "N/A", "producers-description": "Language producers were English speakers in U.S., Canada, U.K and Australia.", "topics": "No", "validated": "validated by crowdworker", "pre-processed": "N/A", "is-filtered": "algorithmically", "filtered-criteria": "Adversarial filtering algorithm as described in the [paper](https://arxiv.org/abs/1908.05739)" }, "annotations": { "origin": "automatically created", "rater-number": "N/A", "rater-qualifications": "N/A", "rater-training-num": "N/A", "rater-test-num": "N/A", "rater-annotation-service-bool": "no", "rater-annotation-service": [], "values": "Each observation is associated with a list of COMET (https://arxiv.org/abs/1906.05317) inferences.", "quality-control": "none", "quality-control-details": "N/A" }, "consent": { "has-consent": "no", "consent-policy": "N/A", "consent-other": "N/A" }, "pii": { "has-pii": "no PII", "no-pii-justification": "The dataset contains day-to-day events. It does not contain names, emails, addresses etc. ", "pii-categories": [], "is-pii-identified": "N/A", "pii-identified-method": "N/A", "is-pii-replaced": "N/A", "pii-replaced-method": "N/A" }, "maintenance": { "has-maintenance": "no", "description": "N/A", "contact": "N/A", "contestation-mechanism": "N/A", "contestation-link": "N/A", "contestation-description": "N/A" } }, "context": { "previous": { "is-deployed": "no", "described-risks": "N/A", "changes-from-observation": "N/A" }, "underserved": { "helps-underserved": "no", "underserved-description": "N/A" }, "biases": { "has-biases": "no", "bias-analyses": "N/A" } }, "considerations": { "pii": { "risks-description": "None" }, "licenses": { "dataset-restrictions": [ "public domain" ], "dataset-restrictions-other": "N/A", "data-copyright": [ "public domain" ], "data-copyright-other": "N/A" }, "limitations": {} } }