system HF staff commited on
Commit
458be61
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"arxiv": {"description": "\nScientific papers datasets contains two sets of long and structured documents.\nThe datasets are obtained from ArXiv and PubMed OpenAccess repositories.\n\nBoth \"arxiv\" and \"pubmed\" have two features:\n - article: the body of the document, pagragraphs seperated by \"/n\".\n - abstract: the abstract of the document, pagragraphs seperated by \"/n\".\n - section_names: titles of sections, seperated by \"/n\".\n\n", "citation": "\n@article{Cohan_2018,\n title={A Discourse-Aware Attention Model for Abstractive Summarization of\n Long Documents},\n url={http://dx.doi.org/10.18653/v1/n18-2097},\n DOI={10.18653/v1/n18-2097},\n journal={Proceedings of the 2018 Conference of the North American Chapter of\n the Association for Computational Linguistics: Human Language\n Technologies, Volume 2 (Short Papers)},\n publisher={Association for Computational Linguistics},\n author={Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli},\n year={2018}\n}\n", "homepage": "https://github.com/armancohan/long-summarization", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "abstract": {"dtype": "string", "id": null, "_type": "Value"}, "section_names": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scientific_papers", "config_name": "arxiv", "version": {"version_str": "1.1.1", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"test": {"name": "test", "num_bytes": 217518181, "num_examples": 6440, "dataset_name": "scientific_papers"}, "train": {"name": "train", "num_bytes": 7148443320, "num_examples": 203037, "dataset_name": "scientific_papers"}, "validation": {"name": "validation", "num_bytes": 217128744, "num_examples": 6436, "dataset_name": "scientific_papers"}}, "download_checksums": {"https://drive.google.com/uc?id=1b3rmCSIoh6VhD4HKWjI4HOW-cSwcwbeC&export=download": {"num_bytes": 3624420843, "checksum": "82ed30dd7c66a6497eeb3d7c3090c274e9e32c012438f8e0bb3cce3e6c1fcada"}, "https://drive.google.com/uc?id=1lvsqvsFi3W-pE1SqNZI0s8NR9rC1tsja&export=download": {"num_bytes": 880225504, "checksum": "d424074726a5e29e20bf834055fe7efe90f8a37bce0a2b512e4ab7e487013c04"}}, "download_size": 4504646347, "dataset_size": 7583090245, "size_in_bytes": 12087736592}, "pubmed": {"description": "\nScientific papers datasets contains two sets of long and structured documents.\nThe datasets are obtained from ArXiv and PubMed OpenAccess repositories.\n\nBoth \"arxiv\" and \"pubmed\" have two features:\n - article: the body of the document, pagragraphs seperated by \"/n\".\n - abstract: the abstract of the document, pagragraphs seperated by \"/n\".\n - section_names: titles of sections, seperated by \"/n\".\n\n", "citation": "\n@article{Cohan_2018,\n title={A Discourse-Aware Attention Model for Abstractive Summarization of\n Long Documents},\n url={http://dx.doi.org/10.18653/v1/n18-2097},\n DOI={10.18653/v1/n18-2097},\n journal={Proceedings of the 2018 Conference of the North American Chapter of\n the Association for Computational Linguistics: Human Language\n Technologies, Volume 2 (Short Papers)},\n publisher={Association for Computational Linguistics},\n author={Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli},\n year={2018}\n}\n", "homepage": "https://github.com/armancohan/long-summarization", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "abstract": {"dtype": "string", "id": null, "_type": "Value"}, "section_names": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scientific_papers", "config_name": "pubmed", "version": {"version_str": "1.1.1", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"test": {"name": "test", "num_bytes": 127187780, "num_examples": 6658, "dataset_name": "scientific_papers"}, "train": {"name": "train", "num_bytes": 2252087227, "num_examples": 119924, "dataset_name": "scientific_papers"}, "validation": {"name": "validation", "num_bytes": 127406718, "num_examples": 6633, "dataset_name": "scientific_papers"}}, "download_checksums": {"https://drive.google.com/uc?id=1b3rmCSIoh6VhD4HKWjI4HOW-cSwcwbeC&export=download": {"num_bytes": 3624420843, "checksum": "82ed30dd7c66a6497eeb3d7c3090c274e9e32c012438f8e0bb3cce3e6c1fcada"}, "https://drive.google.com/uc?id=1lvsqvsFi3W-pE1SqNZI0s8NR9rC1tsja&export=download": {"num_bytes": 880225504, "checksum": "d424074726a5e29e20bf834055fe7efe90f8a37bce0a2b512e4ab7e487013c04"}}, "download_size": 4504646347, "dataset_size": 2506681725, "size_in_bytes": 7011328072}}
dummy/arxiv/1.1.1/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdb4ffacf8d2d0950f715aae4702c00c20c8ef2edc16dbfd99be80343804a701
3
+ size 3497
dummy/pubmed/1.1.1/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2c8b90389b4948b07a51da1ec147b47eb40f2227b88e64fbd47d92d1b468fd1
3
+ size 3520
scientific_papers.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Scientific Papers Dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @article{Cohan_2018,
29
+ title={A Discourse-Aware Attention Model for Abstractive Summarization of
30
+ Long Documents},
31
+ url={http://dx.doi.org/10.18653/v1/n18-2097},
32
+ DOI={10.18653/v1/n18-2097},
33
+ journal={Proceedings of the 2018 Conference of the North American Chapter of
34
+ the Association for Computational Linguistics: Human Language
35
+ Technologies, Volume 2 (Short Papers)},
36
+ publisher={Association for Computational Linguistics},
37
+ author={Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli},
38
+ year={2018}
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """
43
+ Scientific papers datasets contains two sets of long and structured documents.
44
+ The datasets are obtained from ArXiv and PubMed OpenAccess repositories.
45
+
46
+ Both "arxiv" and "pubmed" have two features:
47
+ - article: the body of the document, pagragraphs seperated by "/n".
48
+ - abstract: the abstract of the document, pagragraphs seperated by "/n".
49
+ - section_names: titles of sections, seperated by "/n".
50
+
51
+ """
52
+
53
+ _DOCUMENT = "article"
54
+ _SUMMARY = "abstract"
55
+
56
+ _URLS = {
57
+ "arxiv": "https://drive.google.com/uc?id=1b3rmCSIoh6VhD4HKWjI4HOW-cSwcwbeC&export=download",
58
+ "pubmed": "https://drive.google.com/uc?id=1lvsqvsFi3W-pE1SqNZI0s8NR9rC1tsja&export=download",
59
+ }
60
+
61
+
62
+ class ScientificPapersConfig(datasets.BuilderConfig):
63
+ """BuilderConfig for Scientific Papers."""
64
+
65
+ def __init__(self, filename=None, **kwargs):
66
+ """BuilderConfig for Wikihow.
67
+
68
+ Args:
69
+ filename: filename of different configs for the dataset.
70
+ **kwargs: keyword arguments forwarded to super.
71
+ """
72
+ # 1.1.0 remove sentence breaker <S> and </S> in summary.
73
+ super(ScientificPapersConfig, self).__init__(version=datasets.Version("1.1.1"), **kwargs)
74
+ self.filename = filename
75
+
76
+
77
+ class ScientificPapers(datasets.GeneratorBasedBuilder):
78
+ """Scientific Papers."""
79
+
80
+ BUILDER_CONFIGS = [
81
+ ScientificPapersConfig(name="pubmed", description="Documents from PubMed repository."),
82
+ ScientificPapersConfig(name="arxiv", description="Documents from ArXiv repository."),
83
+ ]
84
+
85
+ def _info(self):
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features(
89
+ {
90
+ _DOCUMENT: datasets.Value("string"),
91
+ _SUMMARY: datasets.Value("string"),
92
+ "section_names": datasets.Value("string"),
93
+ }
94
+ ),
95
+ supervised_keys=None,
96
+ homepage="https://github.com/armancohan/long-summarization",
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ """Returns SplitGenerators."""
102
+ dl_paths = dl_manager.download_and_extract(_URLS)
103
+ path = os.path.join(dl_paths[self.config.name], self.config.name + "-dataset")
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={"path": os.path.join(path, "train.txt")},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ gen_kwargs={"path": os.path.join(path, "val.txt")},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={"path": os.path.join(path, "test.txt")},
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(self, path=None):
120
+ """Yields examples."""
121
+ with open(path, encoding="utf-8") as f:
122
+ for line in f:
123
+ # Possible keys are:
124
+ # "article_id": str
125
+ # "article_text": list[str] article (list of paragraphs).
126
+ # "abstract_text": list[str], abstract (list of paragraphs).
127
+ # "section_names": list[str], list of section names.
128
+ # "sections": list[list[str]], list of sections (list of paragraphs)
129
+ d = json.loads(line)
130
+ summary = "\n".join(d["abstract_text"])
131
+ # In original paper, <S> and </S> are not used in vocab during training
132
+ # or during decoding.
133
+ # https://github.com/armancohan/long-summarization/blob/master/data.py#L27
134
+ summary = summary.replace("<S>", "").replace("</S>", "")
135
+ yield d["article_id"], {
136
+ _DOCUMENT: "\n".join(d["article_text"]),
137
+ _SUMMARY: summary,
138
+ "section_names": "\n".join(d["section_names"]),
139
+ }