yuvalkirstain
commited on
Commit
•
b847b51
1
Parent(s):
4316980
move citations and descriptions to another module
Browse files- citations_and_descriptions.py +56 -0
- fs.py +6 -67
citations_and_descriptions.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_FS_CITATION = """
|
2 |
+
TBD
|
3 |
+
"""
|
4 |
+
|
5 |
+
_FS_DESCRIPTION = """
|
6 |
+
TBD
|
7 |
+
"""
|
8 |
+
|
9 |
+
_SUMM_SCREEN_DESCRIPTION = """
|
10 |
+
SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).
|
11 |
+
Given a transcript of a specific episode, the goal is to produce the episode's recap.
|
12 |
+
The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts.
|
13 |
+
For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows,
|
14 |
+
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
|
15 |
+
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
|
16 |
+
|
17 |
+
_GOV_REPORT_DESCRIPTION = """
|
18 |
+
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
|
19 |
+
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
|
20 |
+
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
|
21 |
+
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
|
22 |
+
|
23 |
+
_ARXIV_DESCRIPTION = """
|
24 |
+
"""
|
25 |
+
|
26 |
+
_SUMM_SCREEN_CITATION = r"""
|
27 |
+
@misc{chen2021summscreen,
|
28 |
+
title={SummScreen: A Dataset for Abstractive Screenplay Summarization},
|
29 |
+
author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel},
|
30 |
+
year={2021},
|
31 |
+
eprint={2104.07091},
|
32 |
+
archivePrefix={arXiv},
|
33 |
+
primaryClass={cs.CL}
|
34 |
+
}"""
|
35 |
+
|
36 |
+
_GOV_REPORT_CITATION = r"""
|
37 |
+
@inproceedings{huang-etal-2021-efficient,
|
38 |
+
title = "Efficient Attentions for Long Document Summarization",
|
39 |
+
author = "Huang, Luyang and
|
40 |
+
Cao, Shuyang and
|
41 |
+
Parulian, Nikolaus and
|
42 |
+
Ji, Heng and
|
43 |
+
Wang, Lu",
|
44 |
+
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
45 |
+
month = jun,
|
46 |
+
year = "2021",
|
47 |
+
address = "Online",
|
48 |
+
publisher = "Association for Computational Linguistics",
|
49 |
+
url = "https://aclanthology.org/2021.naacl-main.112",
|
50 |
+
doi = "10.18653/v1/2021.naacl-main.112",
|
51 |
+
pages = "1419--1436",
|
52 |
+
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
|
53 |
+
}"""
|
54 |
+
|
55 |
+
_ARXIV_CITATION = r"""
|
56 |
+
}"""
|
fs.py
CHANGED
@@ -4,70 +4,13 @@
|
|
4 |
|
5 |
import json
|
6 |
import os
|
7 |
-
from abc import abstractmethod
|
8 |
-
|
9 |
import datasets
|
10 |
-
from
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
_FS_DESCRIPTION = """
|
18 |
-
TBD
|
19 |
-
"""
|
20 |
-
|
21 |
-
_SUMM_SCREEN_DESCRIPTION = """
|
22 |
-
SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).
|
23 |
-
Given a transcript of a specific episode, the goal is to produce the episode's recap.
|
24 |
-
The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts.
|
25 |
-
For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows,
|
26 |
-
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
|
27 |
-
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
|
28 |
-
|
29 |
-
_GOV_REPORT_DESCRIPTION = """
|
30 |
-
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
|
31 |
-
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
|
32 |
-
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
|
33 |
-
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
|
34 |
-
|
35 |
-
_ARXIV_DESCRIPTION = """
|
36 |
-
"""
|
37 |
-
|
38 |
-
_SUMM_SCREEN_CITATION = r"""
|
39 |
-
@misc{chen2021summscreen,
|
40 |
-
title={SummScreen: A Dataset for Abstractive Screenplay Summarization},
|
41 |
-
author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel},
|
42 |
-
year={2021},
|
43 |
-
eprint={2104.07091},
|
44 |
-
archivePrefix={arXiv},
|
45 |
-
primaryClass={cs.CL}
|
46 |
-
}"""
|
47 |
-
|
48 |
-
_GOV_REPORT_CITATION = r"""
|
49 |
-
@inproceedings{huang-etal-2021-efficient,
|
50 |
-
title = "Efficient Attentions for Long Document Summarization",
|
51 |
-
author = "Huang, Luyang and
|
52 |
-
Cao, Shuyang and
|
53 |
-
Parulian, Nikolaus and
|
54 |
-
Ji, Heng and
|
55 |
-
Wang, Lu",
|
56 |
-
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
57 |
-
month = jun,
|
58 |
-
year = "2021",
|
59 |
-
address = "Online",
|
60 |
-
publisher = "Association for Computational Linguistics",
|
61 |
-
url = "https://aclanthology.org/2021.naacl-main.112",
|
62 |
-
doi = "10.18653/v1/2021.naacl-main.112",
|
63 |
-
pages = "1419--1436",
|
64 |
-
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
|
65 |
-
}"""
|
66 |
-
|
67 |
-
_ARXIV_CITATION = r"""
|
68 |
-
}"""
|
69 |
-
|
70 |
-
SUMM_PROMPT = "Summary: "
|
71 |
|
72 |
|
73 |
class FSConfig(datasets.BuilderConfig):
|
@@ -116,8 +59,6 @@ class FSConfig(datasets.BuilderConfig):
|
|
116 |
class ScrollsConfig(FSConfig):
|
117 |
def __init__(self, **kwargs):
|
118 |
super().__init__(**kwargs)
|
119 |
-
self.prompt = SUMM_PROMPT
|
120 |
-
|
121 |
self.train_file = "train.jsonl"
|
122 |
self.validation_file = "validation.jsonl"
|
123 |
self.test_file = "test.jsonl"
|
@@ -137,8 +78,6 @@ class ScrollsConfig(FSConfig):
|
|
137 |
class ArxivConfig(FSConfig):
|
138 |
def __init__(self, **kwargs):
|
139 |
super().__init__(**kwargs)
|
140 |
-
self.prompt = SUMM_PROMPT
|
141 |
-
|
142 |
self.train_file = "train.txt"
|
143 |
self.validation_file = "val.txt"
|
144 |
self.test_file = "test.txt"
|
|
|
4 |
|
5 |
import json
|
6 |
import os
|
|
|
|
|
7 |
import datasets
|
8 |
+
from citations_and_descriptions import (
|
9 |
+
_SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
|
10 |
+
_GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
|
11 |
+
_ARXIV_CITATION, _ARXIV_DESCRIPTION,
|
12 |
+
_FS_DESCRIPTION, _FS_CITATION
|
13 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
|
16 |
class FSConfig(datasets.BuilderConfig):
|
|
|
59 |
class ScrollsConfig(FSConfig):
|
60 |
def __init__(self, **kwargs):
|
61 |
super().__init__(**kwargs)
|
|
|
|
|
62 |
self.train_file = "train.jsonl"
|
63 |
self.validation_file = "validation.jsonl"
|
64 |
self.test_file = "test.jsonl"
|
|
|
78 |
class ArxivConfig(FSConfig):
|
79 |
def __init__(self, **kwargs):
|
80 |
super().__init__(**kwargs)
|
|
|
|
|
81 |
self.train_file = "train.txt"
|
82 |
self.validation_file = "val.txt"
|
83 |
self.test_file = "test.txt"
|