Datasets:
GEM
/

Tasks:
Other
Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
ArXiv:
Tags:
reasoning
License:
Sebastian Gehrmann commited on
Commit
d897657
1 Parent(s): 0f5243a
Files changed (1) hide show
  1. common_gen.json +10 -7
common_gen.json CHANGED
@@ -2,12 +2,12 @@
2
  "overview": {
3
  "where": {
4
  "has-leaderboard": "yes",
5
- "leaderboard-url": "https://inklab.usc.edu/CommonGen/leaderboard.html",
6
  "leaderboard-description": "The model outputs are evaluated against the crowdsourced references, and ranked by SPICE score. The leaderboard also reports BLEU-4 and CIDEr scores.",
7
- "website": "ttps://inklab.usc.edu/CommonGen/",
8
- "data-url": "https://github.com/INK-USC/CommonGen",
9
- "paper-url": "https://aclanthology.org/2020.findings-emnlp.165",
10
- "paper-bibtext": "@inproceedings{lin-etal-2020-commongen,\n title = \"{C}ommon{G}en: A Constrained Text Generation Challenge for Generative Commonsense Reasoning\",\n author = \"Lin, Bill Yuchen and\n Zhou, Wangchunshu and\n Shen, Ming and\n Zhou, Pei and\n Bhagavatula, Chandra and\n Choi, Yejin and\n Ren, Xiang\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.165\",\n pages = \"1823--1840\",\n}",
11
  "contact-email": "yuchen.lin@usc.edu",
12
  "contact-name": "Bill Yuchen Lin"
13
  },
@@ -36,13 +36,16 @@
36
  "gem-added-by": "Yacine Jernite created the initial data card. It was later extended by Simon Mille. Sebastian Gehrmann migrated it to the GEMv2 format. "
37
  },
38
  "structure": {
39
- "data-fields": "A data instance has the following fields\n* `concepts`: a `list` of `string` values denoting the concept the system should write about. Has 3 to 5 items, constitutes the `input` of the task.\n* `target`: a sentence `string` mentioning all of the above mentioned `concepts`. Constitutes the desired `output` of the task.\n",
40
  "structure-splits": "Each example in the dataset consists of a set of 3 to 5 concepts denoted by a single noun, verb, or adjective (the input), and a sentence using these concepts (the output). The dataset provides several such sentences for each such concept.\n\n| | Train | Dev | Test |\n|---------------------------|--------|-------|-------|\n| **Total concept-sets** | 32,651 | 993 | 1,497 |\n| **Total sentences** | 67,389 | 4,018 | 6,042 |\n|**Average sentence length**| 10.54 | 11.55 | 13.34 |\n\n",
41
- "structure-example": "[\n {\n \"concepts\": ['ski', 'mountain', 'skier'],\n \"target\": 'Skier skis down the mountain',\n },\n {\n \"concepts\": ['ski', 'mountain', 'skier'],\n \"target\": 'Three skiers are skiing on a snowy mountain.',\n },\n]",
42
  "structure-splits-criteria": "The dev and test set were created by sampling sets of concepts of size 4 or 5 (and as many of size 3 for the dev set) present in the source captioning datasets and having crowd-workers write reference sentences using these concepts.\n\nConversely, the training set has more concept sets of size 3 than of size 4 and 5, and uses the original captions from the source datasets as references.\n\nThe authors also ensured that the training, dev and test set have different combinations of unique concepts to ensure compositionality (details in [Table 1](https://arxiv.org/pdf/1911.03705v3.pdf)).",
43
  "structure-outlier": "n/a",
44
  "structure-labels": "n/a",
45
  "structure-description": "n/a"
 
 
 
46
  }
47
  },
48
  "curation": {
 
2
  "overview": {
3
  "where": {
4
  "has-leaderboard": "yes",
5
+ "leaderboard-url": "[Link](https://inklab.usc.edu/CommonGen/leaderboard.html)",
6
  "leaderboard-description": "The model outputs are evaluated against the crowdsourced references, and ranked by SPICE score. The leaderboard also reports BLEU-4 and CIDEr scores.",
7
+ "website": "[link](https://inklab.usc.edu/CommonGen/)",
8
+ "data-url": "[Link](https://github.com/INK-USC/CommonGen)",
9
+ "paper-url": "[Link](https://aclanthology.org/2020.findings-emnlp.165)",
10
+ "paper-bibtext": "```\n@inproceedings{lin-etal-2020-commongen,\n title = \"{C}ommon{G}en: A Constrained Text Generation Challenge for Generative Commonsense Reasoning\",\n author = \"Lin, Bill Yuchen and\n Zhou, Wangchunshu and\n Shen, Ming and\n Zhou, Pei and\n Bhagavatula, Chandra and\n Choi, Yejin and\n Ren, Xiang\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.165\",\n pages = \"1823--1840\",\n}\n```",
11
  "contact-email": "yuchen.lin@usc.edu",
12
  "contact-name": "Bill Yuchen Lin"
13
  },
 
36
  "gem-added-by": "Yacine Jernite created the initial data card. It was later extended by Simon Mille. Sebastian Gehrmann migrated it to the GEMv2 format. "
37
  },
38
  "structure": {
39
+ "data-fields": "A data instance has the following fields:\n\n- `concepts`: a `list` of `string` values denoting the concept the system should write about. Has 3 to 5 items, constitutes the `input` of the task.\n- `target`: a sentence `string` mentioning all of the above mentioned `concepts`. Constitutes the desired `output` of the task.\n",
40
  "structure-splits": "Each example in the dataset consists of a set of 3 to 5 concepts denoted by a single noun, verb, or adjective (the input), and a sentence using these concepts (the output). The dataset provides several such sentences for each such concept.\n\n| | Train | Dev | Test |\n|---------------------------|--------|-------|-------|\n| **Total concept-sets** | 32,651 | 993 | 1,497 |\n| **Total sentences** | 67,389 | 4,018 | 6,042 |\n|**Average sentence length**| 10.54 | 11.55 | 13.34 |\n\n",
41
+ "structure-example": "```\n[\n {\n \"concepts\": ['ski', 'mountain', 'skier'],\n \"target\": 'Skier skis down the mountain',\n },\n {\n \"concepts\": ['ski', 'mountain', 'skier'],\n \"target\": 'Three skiers are skiing on a snowy mountain.',\n },\n]\n```",
42
  "structure-splits-criteria": "The dev and test set were created by sampling sets of concepts of size 4 or 5 (and as many of size 3 for the dev set) present in the source captioning datasets and having crowd-workers write reference sentences using these concepts.\n\nConversely, the training set has more concept sets of size 3 than of size 4 and 5, and uses the original captions from the source datasets as references.\n\nThe authors also ensured that the training, dev and test set have different combinations of unique concepts to ensure compositionality (details in [Table 1](https://arxiv.org/pdf/1911.03705v3.pdf)).",
43
  "structure-outlier": "n/a",
44
  "structure-labels": "n/a",
45
  "structure-description": "n/a"
46
+ },
47
+ "what": {
48
+ "dataset": "CommonGen is an English text generation task to explicitly test machines for the ability of generative commonsense reasoning. Given a set of common concepts, the task is to generate a coherent sentence describing an everyday scenario using these concepts. CommonGen is challenging because it inherently requires 1) relational reasoning using background commonsense knowledge, and 2) compositional generalization ability to work on unseen concept combinations. The dataset, constructed through a combination of crowd-sourcing from AMT and existing caption corpora, consists of 30k concept-sets and 50k sentences in total. Note that the CommonGen test set is private and requires submission to the external leaderboard."
49
  }
50
  },
51
  "curation": {