add data loading script
Browse files- SciDuet.py +168 -0
- validation.json +0 -0
SciDuet.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
_CITATION = """\
|
7 |
+
@inproceedings{sun-etal-2021-d2s,
|
8 |
+
title = "{D}2{S}: Document-to-Slide Generation Via Query-Based Text Summarization",
|
9 |
+
author = "Sun, Edward and
|
10 |
+
Hou, Yufang and
|
11 |
+
Wang, Dakuo and
|
12 |
+
Zhang, Yunfeng and
|
13 |
+
Wang, Nancy X. R.",
|
14 |
+
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
15 |
+
month = June,
|
16 |
+
year = "2021",
|
17 |
+
address = "Online",
|
18 |
+
publisher = "Association for Computational Linguistics",
|
19 |
+
url = "https://aclanthology.org/2021.naacl-main.111",
|
20 |
+
doi = "10.18653/v1/2021.naacl-main.111",
|
21 |
+
pages = "1405--1418",
|
22 |
+
}
|
23 |
+
"""
|
24 |
+
|
25 |
+
_DESCRIPTION = """\
|
26 |
+
SciDuet is the first publicaly available dataset for the challenging task of document2slides generation,
|
27 |
+
The dataset integrated into GEM is the ACL portion of the whole dataset described in "https://aclanthology.org/2021.naacl-main.111.pdf".
|
28 |
+
It contains the full Dev and Test sets, and a portion of the Train dataset.
|
29 |
+
We additionally create a challenge dataset in which the slide titles do not match with the
|
30 |
+
section headers of the corresponding paper.
|
31 |
+
Note that although we cannot release the whole training dataset due to copyright issues, researchers can still
|
32 |
+
use our released data procurement code from https://github.com/IBM/document2slides
|
33 |
+
to generate the training dataset from the online ICML/NeurIPS anthologies.
|
34 |
+
In the released dataset, the original papers and slides (both are in PDF format) are carefully processed by a combination of PDF/Image processing tookits.
|
35 |
+
The text contents from multiple slides that correspond to the same slide title are mreged.
|
36 |
+
"""
|
37 |
+
|
38 |
+
_URL = "https://huggingface.co/datasets/GEM/SciDuet/"
|
39 |
+
_URLs = {
|
40 |
+
"train": "train.json",
|
41 |
+
"validation": "validation.json",
|
42 |
+
"test": "test.json",
|
43 |
+
"challenge_set": "challenge_woSectionHeader.json",
|
44 |
+
}
|
45 |
+
|
46 |
+
|
47 |
+
class SciDuetConfig(datasets.BuilderConfig):
|
48 |
+
"""BuilderConfig for SciDuet."""
|
49 |
+
|
50 |
+
def __init__(self, **kwargs):
|
51 |
+
"""BuilderConfig for SciDuet.
|
52 |
+
Args:
|
53 |
+
**kwargs: keyword arguments forwarded to super.
|
54 |
+
"""
|
55 |
+
super(SciDuetConfig, self).__init__(**kwargs)
|
56 |
+
|
57 |
+
|
58 |
+
class SciDuet(datasets.GeneratorBasedBuilder):
|
59 |
+
VERSION_1 = datasets.Version("1.0.0")
|
60 |
+
|
61 |
+
BUILDER_CONFIGS = [
|
62 |
+
SciDuetConfig(name="gem_data_split", version=VERSION_1, description="SciDuet - GEM version 1"),
|
63 |
+
]
|
64 |
+
|
65 |
+
DEFAULT_CONFIG_NAME = "gem_data_split"
|
66 |
+
|
67 |
+
def _info(self):
|
68 |
+
|
69 |
+
return datasets.DatasetInfo(
|
70 |
+
description=_DESCRIPTION,
|
71 |
+
features=datasets.Features(
|
72 |
+
{
|
73 |
+
"gem_id": datasets.Value("string"),
|
74 |
+
"paper_id": datasets.Value("string"),
|
75 |
+
"paper_title": datasets.Value("string"),
|
76 |
+
"paper_abstract": datasets.Value("string"),
|
77 |
+
"paper_content": datasets.Features.Sequence({
|
78 |
+
"paper_content_id": datasets.Value("int32"),
|
79 |
+
"paper_content_text": datasets.Value("string"),
|
80 |
+
}),
|
81 |
+
"paper_headers": datasets.Features.Sequence({
|
82 |
+
"paper_header_number": datasets.Value("string"),
|
83 |
+
"paper_header_content": datasets.Value("string"),
|
84 |
+
}),
|
85 |
+
|
86 |
+
"slide_id": datasets.Value("string"),
|
87 |
+
"slide_title": datasets.Value("string"),
|
88 |
+
"slide_content_text": datasets.Value("string"),
|
89 |
+
|
90 |
+
}
|
91 |
+
),
|
92 |
+
# If there's a common (input, target) tuple from the features,
|
93 |
+
# specify them here. They'll be used if as_supervised=True in
|
94 |
+
# builder.as_dataset.
|
95 |
+
supervised_keys=None,
|
96 |
+
license="Apache License 2.0",
|
97 |
+
citation=_CITATION,
|
98 |
+
)
|
99 |
+
|
100 |
+
def _split_generators(self, dl_manager):
|
101 |
+
urls_to_download = _URLS[self.config.name]
|
102 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
103 |
+
|
104 |
+
return [
|
105 |
+
datasets.SplitGenerator(
|
106 |
+
name=datasets.Split.TRAIN,
|
107 |
+
gen_kwargs={
|
108 |
+
"filepath": downloaded_files["train"],
|
109 |
+
"split": "train",
|
110 |
+
},
|
111 |
+
),
|
112 |
+
datasets.SplitGenerator(
|
113 |
+
name=datasets.Split.VALIDATION,
|
114 |
+
gen_kwargs={
|
115 |
+
"filepath": downloaded_files["validation"],
|
116 |
+
"split": "validation",
|
117 |
+
},
|
118 |
+
),
|
119 |
+
datasets.SplitGenerator(
|
120 |
+
name=datasets.Split.TEST,
|
121 |
+
gen_kwargs={
|
122 |
+
"filepath": downloaded_files["test"],
|
123 |
+
"split": "test",
|
124 |
+
},
|
125 |
+
),
|
126 |
+
] + [
|
127 |
+
datasets.SplitGenerator(
|
128 |
+
name="challenge_woSectionHeader",
|
129 |
+
gen_kwargs={
|
130 |
+
"filepath": downloaded_files["challenge_set"],
|
131 |
+
"split": "challenge_woSectionHeader",
|
132 |
+
},
|
133 |
+
),
|
134 |
+
]
|
135 |
+
|
136 |
+
def _generate_examples(self, filepath, split):
|
137 |
+
"""Yields examples."""
|
138 |
+
with open(filepath, encoding="utf-8") as f:
|
139 |
+
data = json.load(f)["data"]
|
140 |
+
for i in data:
|
141 |
+
gem_id = data[i]["gem_id"]
|
142 |
+
paper_id = data[i]["paper_id"]
|
143 |
+
paper_title = data[i]["paper_title"]
|
144 |
+
paper_abstract = data[i]["paper"]["abstract"]
|
145 |
+
paper_content_ids = [text["id"] for text in data[i]["paper"]["text"]]
|
146 |
+
paper_content_texts = [text["string"] for text in data[i]["paper"]["text"]]
|
147 |
+
paper_header_numbers = [header["n"] for header in data[i]["paper"]["headers"]]
|
148 |
+
paper_header_contents = [header["section"] for header in data[i]["paper"]["headers"]]
|
149 |
+
for j in data[i]["slides"]:
|
150 |
+
id_ = gem_id + "#" + "paper-" + paper_id + "#" + "slide-" + str(j)
|
151 |
+
slide_title = data[i]["slides"][j]["title"]
|
152 |
+
slide_content_text = '\n'.join(data[i]["slides"][j]["text"])
|
153 |
+
|
154 |
+
yield id_, {
|
155 |
+
"gem_id": gem_id,
|
156 |
+
"paper_id": paper_id,
|
157 |
+
"paper_title": paper_title,
|
158 |
+
"paper_abstract": paper_abstract,
|
159 |
+
"paper_content": {"paper_content_id":paper_content_ids, "paper_content_text":paper_content_texts},
|
160 |
+
"paper_header": {"paper_header_number": paper_header_numbers, "paper_header_content": paper_header_contents},
|
161 |
+
|
162 |
+
"slide_title": slide_title,
|
163 |
+
"slide_content_text": slide_content_text,
|
164 |
+
}
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
|
validation.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|