eladsegal commited on
Commit
2cdb235
1 Parent(s): 21ec5fe

Update scrolls.py

Browse files
Files changed (1) hide show
  1. scrolls.py +45 -31
scrolls.py CHANGED
@@ -1,9 +1,6 @@
1
  # coding=utf-8
2
- # TODO License
3
-
4
  # Lint as: python3
5
- """The Scrolls benchmark."""
6
-
7
 
8
  import json
9
  import os
@@ -12,47 +9,70 @@ import datasets
12
  _SCROLLS_CITATION = """
13
  @article{ TODO citation here
14
  }
15
- Note that each Scrolls dataset has its own citation. Please see the source to
16
  get the correct citation for each contained dataset.
17
  """
18
 
19
  _SCROLLS_DESCRIPTION = """
20
- Scrolls (https://TODO.com/) is a benchmark of generative tasks that require processing long texts.
 
 
21
  """
22
 
23
  _SUMM_SCREEN_DESCRIPTION = """
24
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
25
- show episode, given the full script."""
 
 
 
 
26
 
27
 
28
  _QASPER_DESCRIPTION = """
29
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
30
- show episode, given the full script."""
 
 
31
 
32
 
33
  _QMSUM_DESCRIPTION = """
34
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
35
- show episode, given the full script."""
36
-
 
 
37
 
38
  _NARRATIVE_QA_DESCRIPTION = """
39
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
40
- show episode, given the full script."""
 
 
 
41
 
42
 
43
  _GOV_REPORT_DESCRIPTION = """
44
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
45
- show episode, given the full script."""
 
 
46
 
47
 
48
  _CONTRACT_NLI_DESCRIPTION = """
49
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
50
- show episode, given the full script."""
 
 
51
 
52
 
53
  _QUALITY_DESCRIPTION = """
54
- SummScreen (Boolean Questions, Clark et al., 2019a) is a summarization task where one should output the recap of a TV
55
- show episode, given the full script."""
 
 
 
 
 
 
56
 
57
 
58
  _SUMM_SCREEN_CITATION = r"""
@@ -160,10 +180,10 @@ _QUALITY_CITATION = """\
160
 
161
 
162
  class ScrollsConfig(datasets.BuilderConfig):
163
- """BuilderConfig for Scrolls."""
164
 
165
  def __init__(self, features, data_url, citation, url, **kwargs):
166
- """BuilderConfig for Scrolls.
167
  Args:
168
  features: `list[string]`, list of the features that will appear in the
169
  feature dict. Should not include "label".
@@ -175,13 +195,7 @@ class ScrollsConfig(datasets.BuilderConfig):
175
  'False' or 'True'.
176
  **kwargs: keyword arguments forwarded to super.
177
  """
178
- # Version history:
179
- # 1.0.2: Fixed non-nondeterminism in ReCoRD.
180
- # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
181
- # the full release (v2.0).
182
- # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
183
- # 0.0.2: Initial version.
184
- super(ScrollsConfig, self).__init__(version=datasets.Version("1.0.2"), **kwargs)
185
  self.features = features
186
  self.data_url = data_url
187
  self.citation = citation
@@ -195,7 +209,7 @@ class QualityConfig(ScrollsConfig):
195
 
196
 
197
  class Scrolls(datasets.GeneratorBasedBuilder):
198
- """The SuperGLUE benchmark."""
199
 
200
  features = ["id", "pid", "input", "output"]
201
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
 
1
  # coding=utf-8
 
 
2
  # Lint as: python3
3
+ """The SCROLLS benchmark."""
 
4
 
5
  import json
6
  import os
 
9
  _SCROLLS_CITATION = """
10
  @article{ TODO citation here
11
  }
12
+ Note that each SCROLLS dataset has its own citation. Please see the source to
13
  get the correct citation for each contained dataset.
14
  """
15
 
16
  _SCROLLS_DESCRIPTION = """
17
+ SCROLLS: Standardized CompaRison Over Long Language Sequences.
18
+ A suite of natural language datasets that require reasoning over long texts.
19
+ https://scrolls-benchmark.com/
20
  """
21
 
22
  _SUMM_SCREEN_DESCRIPTION = """
23
+ SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).
24
+ Given a transcript of a specific episode, the goal is to produce the episode's recap.
25
+ The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts.
26
+ For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows,
27
+ making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
28
+ Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
29
 
30
 
31
  _QASPER_DESCRIPTION = """
32
+ Qasper (Dasigi et al., 2021) is a question answering dataset over NLP papers filtered from the Semantic Scholar Open Research Corpus (S2ORC).
33
+ Questions were written by NLP practitioners after reading only the title and abstract of the papers,
34
+ while another set of NLP practitioners annotated the answers given the entire document.
35
+ Qasper contains abstractive, extractive, and yes/no questions, as well as unanswerable ones."""
36
 
37
 
38
  _QMSUM_DESCRIPTION = """
39
+ QMSum (Zhong et al., 2021) is a query-based summarization dataset, consisting of 232 meetings transcripts from multiple domains.
40
+ The corpus covers academic group meetings at the International Computer Science Institute and their summaries, industrial product meetings for designing a remote control,
41
+ and committee meetings of the Welsh and Canadian Parliaments, dealing with a variety of public policy issues.
42
+ Annotators were tasked with writing queries about the broad contents of the meetings, as well as specific questions about certain topics or decisions,
43
+ while ensuring that the relevant text for answering each query spans at least 200 words or 10 turns."""
44
 
45
  _NARRATIVE_QA_DESCRIPTION = """
46
+ NarrativeQA (Kočiský et al., 2021) is an established question answering dataset over entire books from Project Gutenberg and movie scripts from different websites.
47
+ Annotators were given summaries of the books and scripts obtained from Wikipedia, and asked to generate question-answer pairs,
48
+ resulting in about 30 questions and answers for each of the 1,567 books and scripts.
49
+ They were encouraged to use their own words rather then copying, and avoid asking yes/no questions or ones about the cast.
50
+ Each question was then answered by an additional annotator, providing each question with two reference answers (unless both answers are identical).."""
51
 
52
 
53
  _GOV_REPORT_DESCRIPTION = """
54
+ GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
55
+ Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
56
+ The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
57
+ for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
58
 
59
 
60
  _CONTRACT_NLI_DESCRIPTION = """
61
+ Contract NLI (Koreeda and Manning, 2021) is a natural language inference dataset in the legal domain.
62
+ Given a non-disclosure agreement (the premise), the task is to predict whether a particular legal statement (the hypothesis) is entailed, not entailed (neutral), or cannot be entailed (contradiction) from the contract.
63
+ The NDAs were manually picked after simple filtering from the Electronic Data Gathering, Analysis, and Retrieval system (EDGAR) and Google.
64
+ The dataset contains a total of 607 contracts and 17 unique hypotheses, which were combined to produce the dataset's 10,319 examples."""
65
 
66
 
67
  _QUALITY_DESCRIPTION = """
68
+ QuALITY (Pang et al., 2021) is a multiple-choice question answering dataset over articles and stories sourced from Project Gutenberg,
69
+ the Open American National Corpus, and more.
70
+ Experienced writers wrote questions and distractors, and were incentivized to write answerable, unambiguous questions such that in order to correctly answer them,
71
+ human annotators must read large portions of the given document.
72
+ Reference answers were then calculated using the majority vote between of the annotators and writer's answers.
73
+ To measure the difficulty of their questions, Pang et al. conducted a speed validation process,
74
+ where another set of annotators were asked to answer questions given only a short period of time to skim through the document.
75
+ As a result, 50% of the questions in QuALITY are labeled as hard, i.e. the majority of the annotators in the speed validation setting chose the wrong answer."""
76
 
77
 
78
  _SUMM_SCREEN_CITATION = r"""
 
180
 
181
 
182
  class ScrollsConfig(datasets.BuilderConfig):
183
+ """BuilderConfig for SCROLLS."""
184
 
185
  def __init__(self, features, data_url, citation, url, **kwargs):
186
+ """BuilderConfig for SCROLLS.
187
  Args:
188
  features: `list[string]`, list of the features that will appear in the
189
  feature dict. Should not include "label".
 
195
  'False' or 'True'.
196
  **kwargs: keyword arguments forwarded to super.
197
  """
198
+ super(ScrollsConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
 
 
 
 
 
 
199
  self.features = features
200
  self.data_url = data_url
201
  self.citation = citation
 
209
 
210
 
211
  class Scrolls(datasets.GeneratorBasedBuilder):
212
+ """The SCROLLS benchmark."""
213
 
214
  features = ["id", "pid", "input", "output"]
215
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset