Datasets:
GEM
/

Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
crowd-sourced
Source Datasets:
original
ArXiv:
License:
w4ngatang commited on
Commit
f1a089f
1 Parent(s): 2476581

update data card

Browse files
Files changed (1) hide show
  1. squality.json +154 -0
squality.json CHANGED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "overview": {
3
+ "what": {},
4
+ "where": {
5
+ "has-leaderboard": "no",
6
+ "leaderboard-url": "N/A",
7
+ "leaderboard-description": "N/A",
8
+ "website": "https://github.com/nyu-mll/SQuALITY",
9
+ "data-url": "https://github.com/nyu-mll/SQuALITY/data",
10
+ "paper-url": "https://arxiv.org/abs/2205.11465",
11
+ "paper-bibtext": "@article{wang2022squality,\n title={S{Q}u{ALITY}: Building a Long-Document Summarization Dataset the Hard Way},\n author={Wang, Alex and Pang, Richard Yuanzhe and Chen, Angelica and Phang, Jason and Bowman, Samuel R.},\n journal={arXiv preprint 2205.11465},\n year={2022}\n}",
12
+ "contact-name": "Alex Wang",
13
+ "contact-email": "wangalexc@gmail.com"
14
+ },
15
+ "languages": {
16
+ "is-multilingual": "no",
17
+ "license": "cc-by-4.0: Creative Commons Attribution 4.0 International",
18
+ "task-other": "N/A",
19
+ "language-names": [
20
+ "English"
21
+ ],
22
+ "intended-use": "summarization research",
23
+ "license-other": "N/A",
24
+ "task": "Summarization",
25
+ "communicative": "Given a question about a particular high-level aspect of a short story, provide a summary about that aspect in the story (e.g., plot, character relationships, setting, theme, etc.)."
26
+ },
27
+ "credit": {
28
+ "organization-type": [
29
+ "academic"
30
+ ],
31
+ "organization-names": "New York University",
32
+ "creators": "Alex Wang (NYU); Angelica Chen (NYU); Richard Yuanzhe Pang (NYU); Nitish Joshi (NYU); Samuel R. Bowman (NYU)",
33
+ "funding": "Eric and Wendy Schmidt; Apple; NSF",
34
+ "gem-added-by": "Alex Wang (NYU)"
35
+ },
36
+ "structure": {}
37
+ },
38
+ "context": {
39
+ "previous": {
40
+ "is-deployed": "no",
41
+ "described-risks": "N/A",
42
+ "changes-from-observation": "N/A"
43
+ },
44
+ "underserved": {
45
+ "helps-underserved": "no",
46
+ "underserved-description": "N/A"
47
+ },
48
+ "biases": {
49
+ "has-biases": "yes"
50
+ }
51
+ },
52
+ "considerations": {
53
+ "pii": {},
54
+ "licenses": {
55
+ "dataset-restrictions-other": "N/A",
56
+ "data-copyright-other": "N/A",
57
+ "dataset-restrictions": [
58
+ "open license - commercial use allowed"
59
+ ],
60
+ "data-copyright": [
61
+ "public domain"
62
+ ]
63
+ },
64
+ "limitations": {}
65
+ },
66
+ "results": {
67
+ "results": {
68
+ "other-metrics-definitions": "N/A",
69
+ "has-previous-results": "yes",
70
+ "current-evaluation": "Human evaluation",
71
+ "previous-results": "See paper (https://arxiv.org/abs/2205.11465)",
72
+ "metrics": [
73
+ "ROUGE",
74
+ "BERT-Score"
75
+ ],
76
+ "original-evaluation": "Following norms in summarization, we have evaluated with automatic evaluation metrics like ROUGE and BERTScore, but these metrics do not correlate with human judgments of summary quality when comparing model summaries (see paper for details). \n\nWe highly recommend users of the benchmark use human evaluation as the primary method for evaluating systems. We present one example of such in the paper in which we ask Upwork workers to read the short story and then rate sets of three responses to each question. While this is close to the gold standard in how we would want to evaluate systems on this task, we recognize that finding workers who will read the whole story (~30m) is difficult and expensive, and doing efficient human evaluation for long document tasks is an open problem."
77
+ }
78
+ },
79
+ "gem": {
80
+ "rationale": {
81
+ "sole-task-dataset": "yes",
82
+ "sole-language-task-dataset": "no",
83
+ "distinction-description": "The inputs (story-question pairs) are multi-reference. The questions are high-level and are written to draw from multiple parts of the story, instead of a single section of the story.",
84
+ "contribution": "The summaries in the dataset were crowdsourced, allowing us to use input documents that are easily understood by crowdworkers (as opposed to technical domains, such as scientific papers). Additionally, there is no lede bias in stories, as is typically in news articles used in benchmark summarization datasets like CNN/DM and XSum.\n\nAdditionally, the dataset is multi-reference and the references for each task are highly diverse. Having a diverse set of references better represents the set of acceptable summaries for an input, and opens the door for creative evaluation methodologies using these multiple references."
85
+ },
86
+ "curation": {
87
+ "has-additional-curation": "no",
88
+ "modification-types": [],
89
+ "modification-description": "N/A",
90
+ "has-additional-splits": "no",
91
+ "additional-splits-description": "N/A",
92
+ "additional-splits-capacicites": "N/A"
93
+ },
94
+ "starting": {
95
+ "research-pointers": "* original paper: https://arxiv.org/abs/2205.11465\n* modeling question-focused summarization: https://arxiv.org/abs/2112.07637\n* similar task format but different domain: https://arxiv.org/abs/2104.05938\n"
96
+ }
97
+ },
98
+ "curation": {
99
+ "original": {
100
+ "is-aggregated": "no",
101
+ "aggregated-sources": "N/A"
102
+ },
103
+ "language": {
104
+ "found": [],
105
+ "crowdsourced": [
106
+ "Other crowdworker platform"
107
+ ],
108
+ "created": "N/A",
109
+ "machine-generated": "N/A",
110
+ "validated": "validated by crowdworker",
111
+ "is-filtered": "not filtered",
112
+ "filtered-criteria": "N/A",
113
+ "obtained": [
114
+ "Crowdsourced"
115
+ ],
116
+ "topics": "The short stories are primarily science fiction and from the 1930s -- 1970s.",
117
+ "producers-description": "Upwork: US-born, native English speakers with backgrounds in the humanities and copywriting\n\nNYU undergraduates: English-fluent undergraduates from a diverse set of nationalities and majors\n\n"
118
+ },
119
+ "annotations": {
120
+ "origin": "crowd-sourced",
121
+ "rater-number": "11<n<50",
122
+ "rater-qualifications": "English-fluent, with experience reading and writing about literature",
123
+ "rater-training-num": "4",
124
+ "rater-test-num": "4",
125
+ "rater-annotation-service-bool": "no",
126
+ "rater-annotation-service": [],
127
+ "values": "N/A",
128
+ "quality-control": "validated by another rater",
129
+ "quality-control-details": "Each response was reviewed by three reviewers, who ranked the response (against two other responses), highlighted errors in the response, and provided feedback to the original response writer."
130
+ },
131
+ "consent": {
132
+ "has-consent": "yes",
133
+ "consent-policy": "Writers were informed that their writing and reviewing would be used in the development of AI.",
134
+ "consent-other": "N/A",
135
+ "no-consent-justification": "N/A"
136
+ },
137
+ "pii": {
138
+ "has-pii": "unlikely",
139
+ "no-pii-justification": "N/A",
140
+ "is-pii-identified": "no identification",
141
+ "pii-identified-method": "N/A",
142
+ "is-pii-replaced": "N/A",
143
+ "pii-replaced-method": "N/A"
144
+ },
145
+ "maintenance": {
146
+ "has-maintenance": "no",
147
+ "description": "N/A",
148
+ "contact": "N/A",
149
+ "contestation-mechanism": "N/A",
150
+ "contestation-link": "N/A",
151
+ "contestation-description": "N/A"
152
+ }
153
+ }
154
+ }